Change log library to spdlog, expose log levels to user ()

* Change from glog to spdlog

* Remove HA tests

* Remove logrotate log configuration

* Define custom main for unit gtests
This commit is contained in:
antonio2368 2021-01-21 15:47:56 +01:00 committed by Antonio Andelic
parent c0bd59bb09
commit 28413fd626
225 changed files with 2437 additions and 2685 deletions
config
libs
query_modules/louvain/test/unit
release
src

View File

@ -39,6 +39,10 @@ modifications:
value: "/var/log/memgraph/memgraph.log"
override: true
- name: "log_level"
value: "WARNING"
override: true
- name: "bolt_num_workers"
value: ""
override: false
@ -81,13 +85,7 @@ modifications:
undocumented:
- "flag_file"
- "log_file_mode"
- "log_link_basename"
- "log_prefix"
- "max_log_size"
- "min_log_level"
- "also_log_to_stderr"
- "help"
- "help_xml"
- "stderr_threshold"
- "stop_logging_if_full_disk"
- "version"

View File

@ -149,24 +149,6 @@ import_external_library(gflags STATIC
-DBUILD_gflags_nothreads_LIB=OFF
-DGFLAGS_NO_FILENAMES=${GFLAGS_NO_FILENAMES})
# Setup google logging after gflags (so that glog can use it).
set(GLOG_DISABLE_OPTIONS "0")
if ("${CMAKE_BUILD_TYPE}" MATCHES "^(R|r)(E|e)(L|l).+")
set(GLOG_DISABLE_OPTIONS "1")
endif()
# Setup google logging after gflags (so that glog can use it).
import_external_library(glog STATIC
${CMAKE_CURRENT_SOURCE_DIR}/glog/lib/libglog.a
${CMAKE_CURRENT_SOURCE_DIR}/glog/include
DEPENDS gflags-proj
CMAKE_ARGS -Dgflags_DIR=${CMAKE_CURRENT_SOURCE_DIR}/gflags/lib/cmake/gflags
-DBUILD_TESTING=OFF
-DGLOG_NO_FILENAMES=${GLOG_DISABLE_OPTIONS}
-DGLOG_NO_STACKTRACE=${GLOG_DISABLE_OPTIONS}
-DGLOG_NO_BUFFER_SETTINGS=${GLOG_DISABLE_OPTIONS}
-DGLOG_NO_TIME_PID_FILENAME=${GLOG_DISABLE_OPTIONS})
# Setup cppitertools
import_header_library(cppitertools ${CMAKE_CURRENT_SOURCE_DIR})
@ -222,3 +204,9 @@ import_external_library(mgclient STATIC
-DBUILD_TESTING=OFF)
find_package(OpenSSL REQUIRED)
target_link_libraries(mgclient INTERFACE ${OPENSSL_LIBRARIES})
# Setup spdlog
import_external_library(spdlog STATIC
${CMAKE_CURRENT_SOURCE_DIR}/spdlog/lib/libspdlog.a
${CMAKE_CURRENT_SOURCE_DIR}/spdlog/include
BUILD_COMMAND $(MAKE) spdlog)

View File

@ -59,10 +59,8 @@ cppitertools_ref="cb3635456bdb531121b82b4d2e3afc7ae1f56d47"
clone https://github.com/ryanhaining/cppitertools.git cppitertools $cppitertools_ref
# fmt
fmt_tag="7fa8f8fa48b0903deab5bb42e6760477173ac485" # v3.0.1
# Commit which fixes an issue when compiling with C++14 and higher.
fmt_cxx14_fix="b9aaa507fc49680d037fd84c043f747a395bce04"
clone https://github.com/fmtlib/fmt.git fmt $fmt_tag $fmt_cxx14_fix
fmt_tag="7bdf0628b1276379886c7f6dda2cef2b3b374f0b" # (2020-11-25)
clone https://github.com/fmtlib/fmt.git fmt $fmt_tag
# rapidcheck
rapidcheck_tag="7bc7d302191a4f3d0bf005692677126136e02f60" # (2020-05-04)
@ -76,10 +74,6 @@ clone https://github.com/google/benchmark.git benchmark $benchmark_tag
googletest_tag="ec44c6c1675c25b9827aacd08c02433cccde7780" # v1.8.0
clone https://github.com/google/googletest.git googletest $googletest_tag
# google logging
glog_tag="042a21657e79784226babab8b942f7bd0949635f" # custom version (v0.3.5+)
clone https://github.com/memgraph/glog.git glog $glog_tag
# google flags
gflags_tag="b37ceb03a0e56c9f15ce80409438a555f8a67b7c" # custom version (May 6, 2017)
clone https://github.com/memgraph/gflags.git gflags $gflags_tag
@ -124,3 +118,6 @@ sed -i 's/\${CMAKE_INSTALL_LIBDIR}/lib/' mgclient/src/CMakeLists.txt
# pymgclient
pymgclient_tag="4f85c179e56302d46a1e3e2cf43509db65f062b3" # (2021-01-15)
clone https://github.com/memgraph/pymgclient.git pymgclient $pymgclient_tag
spdlog_tag="46d418164dd4cd9822cf8ca62a116a3f71569241" # (2020-12-01)
clone https://github.com/gabime/spdlog spdlog $spdlog_tag

View File

@ -17,7 +17,7 @@ function(add_unit_test test_cpp)
# TODO: this is a temporary workaround the test build warnings
target_compile_options(${target_name} PRIVATE -Wno-comment -Wno-sign-compare
-Wno-unused-variable)
target_link_libraries(${target_name} glog gflags gtest gtest_main Threads::Threads
target_link_libraries(${target_name} spdlog gflags gtest gtest_main Threads::Threads
louvain-core louvain-test)
# register test
add_test(${target_name} ${exec_name})

View File

@ -1,4 +1,3 @@
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "data_structures/graph.hpp"
@ -10,8 +9,7 @@ bool CommunityCheck(const comdata::Graph &graph,
const std::vector<uint32_t> &c) {
if (graph.Size() != c.size()) return false;
for (uint32_t node_id = 0; node_id < graph.Size(); ++node_id)
if (graph.Community(node_id) != c[node_id])
return false;
if (graph.Community(node_id) != c[node_id]) return false;
return true;
}
@ -20,8 +18,7 @@ bool DegreeCheck(const comdata::Graph &graph,
const std::vector<uint32_t> &deg) {
if (graph.Size() != deg.size()) return false;
for (uint32_t node_id = 0; node_id < graph.Size(); ++node_id)
if (graph.Degree(node_id) != deg[node_id])
return false;
if (graph.Degree(node_id) != deg[node_id]) return false;
return true;
}
@ -127,10 +124,7 @@ TEST(Graph, Degrees) {
// Chain
// (0)--(1)--(2)--(3)--(4)
graph = BuildGraph(5, {{0, 1, 1},
{1, 2, 1},
{2, 3, 1},
{3, 4, 1}});
graph = BuildGraph(5, {{0, 1, 1}, {1, 2, 1}, {2, 3, 1}, {3, 4, 1}});
deg = {1, 2, 2, 2, 1};
ASSERT_TRUE(DegreeCheck(graph, deg));
@ -140,12 +134,8 @@ TEST(Graph, Degrees) {
// (1) (2)
// | / \
// (4) (5) (6)
graph = BuildGraph(7, {{0, 1, 1},
{0, 2, 1},
{0, 3, 1},
{1, 4, 1},
{2, 5, 1},
{2, 6, 1}});
graph = BuildGraph(
7, {{0, 1, 1}, {0, 2, 1}, {0, 3, 1}, {1, 4, 1}, {2, 5, 1}, {2, 6, 1}});
deg = {3, 2, 3, 1, 1, 1, 1};
ASSERT_TRUE(DegreeCheck(graph, deg));
@ -155,12 +145,12 @@ TEST(Graph, Degrees) {
// | \ | \
// (2)--(3)-(4)
graph = BuildGraph(5, {{0, 1, 1},
{0, 2, 1},
{0, 3, 1},
{1, 3, 1},
{1, 4, 1},
{2, 3, 1},
{3, 4, 1}});
{0, 2, 1},
{0, 3, 1},
{1, 3, 1},
{1, 4, 1},
{2, 3, 1},
{3, 4, 1}});
deg = {3, 3, 2, 4, 2};
ASSERT_TRUE(DegreeCheck(graph, deg));
@ -170,15 +160,15 @@ TEST(Graph, Degrees) {
// | \ | \
// (2*)--(3)-(4*)
graph = BuildGraph(5, {{0, 1, 1},
{0, 2, 1},
{0, 3, 1},
{1, 3, 1},
{1, 4, 1},
{2, 3, 1},
{3, 4, 1},
{1, 1, 1},
{2, 2, 2},
{4, 4, 4}});
{0, 2, 1},
{0, 3, 1},
{1, 3, 1},
{1, 4, 1},
{2, 3, 1},
{3, 4, 1},
{1, 1, 1},
{2, 2, 2},
{4, 4, 4}});
deg = {3, 4, 3, 4, 3};
ASSERT_TRUE(DegreeCheck(graph, deg));
@ -196,10 +186,7 @@ TEST(Graph, Weights) {
// Chain
// (0)--(1)--(2)--(3)--(4)
graph = BuildGraph(5, {{0, 1, 0.1},
{1, 2, 0.5},
{2, 3, 2.3},
{3, 4, 4.2}});
graph = BuildGraph(5, {{0, 1, 0.1}, {1, 2, 0.5}, {2, 3, 2.3}, {3, 4, 4.2}});
inc_w = {0.1, 0.6, 2.8, 6.5, 4.2};
ASSERT_TRUE(IncidentWeightCheck(graph, inc_w));
ASSERT_NEAR(graph.TotalWeight(), 7.1, 1e-6);
@ -211,11 +198,11 @@ TEST(Graph, Weights) {
// | / \
// (4) (5) (6)
graph = BuildGraph(7, {{0, 1, 1.3},
{0, 2, 0.2},
{0, 3, 1},
{1, 4, 3.2},
{2, 5, 4.2},
{2, 6, 0.7}});
{0, 2, 0.2},
{0, 3, 1},
{1, 4, 3.2},
{2, 5, 4.2},
{2, 6, 0.7}});
inc_w = {2.5, 4.5, 5.1, 1, 3.2, 4.2, 0.7};
ASSERT_TRUE(IncidentWeightCheck(graph, inc_w));
EXPECT_NEAR(graph.TotalWeight(), 10.6, 1e-6);
@ -226,12 +213,12 @@ TEST(Graph, Weights) {
// | \ | \
// (2)--(3)-(4)
graph = BuildGraph(5, {{0, 1, 0.1},
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7}});
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7}});
inc_w = {0.6, 1, 0.8, 2, 1.2};
ASSERT_TRUE(IncidentWeightCheck(graph, inc_w));
EXPECT_NEAR(graph.TotalWeight(), 2.8, 1e-6);
@ -242,15 +229,15 @@ TEST(Graph, Weights) {
// | \ | \
// (2*)--(3)-(4*)
graph = BuildGraph(5, {{0, 1, 0.1},
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7},
{1, 1, 0.8},
{2, 2, 0.9},
{4, 4, 1}});
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7},
{1, 1, 0.8},
{2, 2, 0.9},
{4, 4, 1}});
inc_w = {0.6, 1.8, 1.7, 2, 2.2};
ASSERT_TRUE(IncidentWeightCheck(graph, inc_w));
EXPECT_NEAR(graph.TotalWeight(), 5.5, 1e-6);
@ -267,10 +254,7 @@ TEST(Graph, Modularity) {
// Chain
// (0)--(1)--(2)--(3)--(4)
graph = BuildGraph(5, {{0, 1, 0.1},
{1, 2, 0.5},
{2, 3, 2.3},
{3, 4, 4.2}});
graph = BuildGraph(5, {{0, 1, 0.1}, {1, 2, 0.5}, {2, 3, 2.3}, {3, 4, 4.2}});
std::vector<uint32_t> c = {0, 1, 1, 2, 2};
SetCommunities(&graph, c);
EXPECT_NEAR(graph.Modularity(), 0.036798254314620096, 1e-6);
@ -282,11 +266,11 @@ TEST(Graph, Modularity) {
// | / \
// (4) (5) (6)
graph = BuildGraph(7, {{0, 1, 1.3},
{0, 2, 0.2},
{0, 3, 1},
{1, 4, 3.2},
{2, 5, 4.2},
{2, 6, 0.7}});
{0, 2, 0.2},
{0, 3, 1},
{1, 4, 3.2},
{2, 5, 4.2},
{2, 6, 0.7}});
c = {0, 0, 1, 0, 0, 1, 2};
SetCommunities(&graph, c);
EXPECT_NEAR(graph.Modularity(), 0.4424617301530794, 1e-6);
@ -297,12 +281,12 @@ TEST(Graph, Modularity) {
// | \ | \
// (2)--(3)-(4)
graph = BuildGraph(5, {{0, 1, 0.1},
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7}});
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7}});
c = {0, 1, 1, 1, 1};
SetCommunities(&graph, c);
EXPECT_NEAR(graph.Modularity(), -0.022959183673469507, 1e-6);
@ -313,15 +297,15 @@ TEST(Graph, Modularity) {
// | \ | \
// (2*)--(3)-(4*)
graph = BuildGraph(5, {{0, 1, 0.1},
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7},
{1, 1, 0.8},
{2, 2, 0.9},
{4, 4, 1}});
{0, 2, 0.2},
{0, 3, 0.3},
{1, 3, 0.4},
{1, 4, 0.5},
{2, 3, 0.6},
{3, 4, 0.7},
{1, 1, 0.8},
{2, 2, 0.9},
{4, 4, 1}});
c = {0, 0, 0, 0, 1};
SetCommunities(&graph, c);
EXPECT_NEAR(graph.Modularity(), 0.188842975206611, 1e-6);

View File

@ -1,13 +1,2 @@
# logrotate configuration for Memgraph Community
# see "man logrotate" for details
/var/log/memgraph/memgraph.log {
# rotate log files weekly
weekly
# keep 5 weeks worth of backlog
rotate 5
# send SIGUSR1 to notify memgraph to recreate logfile
postrotate
/usr/bin/killall -s SIGUSR1 memgraph
endscript
}

View File

@ -1,17 +1,6 @@
# logrotate configuration for Memgraph Enterprise
# see "man logrotate" for details
/var/log/memgraph/memgraph.log {
# rotate log files weekly
weekly
# keep 5 weeks worth of backlog
rotate 5
# send SIGUSR1 to notify memgraph to recreate logfile
postrotate
/usr/bin/killall -s SIGUSR1 memgraph
endscript
}
/var/lib/memgraph/durability/audit/audit.log {
# rotate log files daily
daily

View File

@ -1,5 +1,5 @@
set(audit_src_files log.cpp)
add_library(mg-audit STATIC ${audit_src_files})
target_link_libraries(mg-audit json glog gflags fmt)
target_link_libraries(mg-audit json gflags fmt)
target_link_libraries(mg-audit mg-utils mg-storage-v2)

View File

@ -3,9 +3,9 @@
#include <chrono>
#include <fmt/format.h>
#include <glog/logging.h>
#include <json/json.hpp>
#include "utils/logging.hpp"
#include "utils/string.hpp"
namespace audit {
@ -55,7 +55,7 @@ Log::Log(const std::filesystem::path &storage_directory, int32_t buffer_size,
started_(false) {}
void Log::Start() {
CHECK(!started_) << "Trying to start an already started audit log!";
MG_ASSERT(!started_, "Trying to start an already started audit log!");
utils::EnsureDirOrDie(storage_directory_);

View File

@ -7,7 +7,7 @@ set(auth_src_files
find_package(Seccomp REQUIRED)
add_library(mg-auth STATIC ${auth_src_files})
target_link_libraries(mg-auth json libbcrypt glog gflags fmt)
target_link_libraries(mg-auth json libbcrypt gflags fmt)
target_link_libraries(mg-auth mg-utils mg-kvstore)
target_link_libraries(mg-auth ${Seccomp_LIBRARIES})

View File

@ -6,10 +6,10 @@
#include <utility>
#include <fmt/format.h>
#include <glog/logging.h>
#include "auth/exceptions.hpp"
#include "utils/flag_validation.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
DEFINE_VALIDATED_string(
@ -94,15 +94,17 @@ std::optional<User> Auth::Authenticate(const std::string &username,
if (FLAGS_auth_module_create_missing_user) {
user = AddUser(username, password);
if (!user) {
LOG(WARNING) << "Couldn't authenticate user '" << username
<< "' using the auth module because the user already "
"exists as a role!";
spdlog::warn(
"Couldn't authenticate user '{}' using the auth module because "
"the user already exists as a role!",
username);
return std::nullopt;
}
} else {
LOG(WARNING)
<< "Couldn't authenticate user '" << username
<< "' using the auth module because the user doesn't exist!";
spdlog::warn(
"Couldn't authenticate user '{}' using the auth module because the "
"user doesn't exist!",
username);
return std::nullopt;
}
} else {
@ -115,17 +117,18 @@ std::optional<User> Auth::Authenticate(const std::string &username,
if (FLAGS_auth_module_create_missing_role) {
role = AddRole(rolename);
if (!role) {
LOG(WARNING)
<< "Couldn't authenticate user '" << username
<< "' using the auth module because the user's role '"
<< rolename << "' already exists as a user!";
spdlog::warn(
"Couldn't authenticate user '{}' using the auth module "
"because the user's role '{}' already exists as a user!",
username, rolename);
return std::nullopt;
}
SaveRole(*role);
} else {
LOG(WARNING) << "Couldn't authenticate user '" << username
<< "' using the auth module because the user's role '"
<< rolename << "' doesn't exist!";
spdlog::warn(
"Couldn't authenticate user '{}' using the auth module because "
"the user's role '{}' doesn't exist!",
username, rolename);
return std::nullopt;
}
}
@ -138,8 +141,16 @@ std::optional<User> Auth::Authenticate(const std::string &username,
return user;
} else {
auto user = GetUser(username);
if (!user) return std::nullopt;
if (!user->CheckPassword(password)) return std::nullopt;
if (!user) {
spdlog::warn(
"Couldn't authenticate user '{}' because the user doesn't exist",
username);
return std::nullopt;
}
if (!user->CheckPassword(password)) {
spdlog::warn("Couldn't authenticate user '{}'", username);
return std::nullopt;
}
return user;
}
}

View File

@ -24,7 +24,8 @@
#include <fmt/format.h>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "utils/logging.hpp"
namespace {
@ -361,13 +362,15 @@ bool Module::Startup() {
// Setup communication pipes.
if (pipe2(pipe_to_module_, O_CLOEXEC) != 0) {
LOG(ERROR) << "Couldn't create communication pipe from the database to "
"the auth module!";
spdlog::error(
"Couldn't create communication pipe from the database to "
"the auth module!");
return false;
}
if (pipe2(pipe_from_module_, O_CLOEXEC) != 0) {
LOG(ERROR) << "Couldn't create communication pipe from the auth module to "
"the database!";
spdlog::error(
"Couldn't create communication pipe from the auth module to "
"the database!");
close(pipe_to_module_[kPipeReadEnd]);
close(pipe_to_module_[kPipeWriteEnd]);
return false;
@ -384,7 +387,7 @@ bool Module::Startup() {
// Create the process.
pid_ = clone(Target, stack_top, CLONE_VFORK, target_arguments_.get());
if (pid_ == -1) {
LOG(ERROR) << "Couldn't start the auth module process!";
spdlog::error("Couldn't start the auth module process!");
close(pipe_to_module_[kPipeReadEnd]);
close(pipe_to_module_[kPipeWriteEnd]);
close(pipe_from_module_[kPipeReadEnd]);
@ -394,7 +397,7 @@ bool Module::Startup() {
// Check whether the process is still running.
if (waitpid(pid_, &status_, WNOHANG | WUNTRACED) != 0) {
LOG(ERROR) << "The auth module process couldn't be started!";
spdlog::error("The auth module process couldn't be started!");
return false;
}
@ -416,18 +419,18 @@ nlohmann::json Module::Call(const nlohmann::json &params,
// Put the request to the module process.
if (!PutData(pipe_to_module_[kPipeWriteEnd], params, timeout_millisec)) {
LOG(ERROR) << "Couldn't send data to the auth module process!";
spdlog::error("Couldn't send data to the auth module process!");
return {};
}
// Get the response from the module process.
auto ret = GetData(pipe_from_module_[kPipeReadEnd], timeout_millisec);
if (ret.is_null()) {
LOG(ERROR) << "Couldn't receive data from the auth module process!";
spdlog::error("Couldn't receive data from the auth module process!");
return {};
}
if (!ret.is_object()) {
LOG(ERROR) << "Data received from the auth module is of wrong type!";
spdlog::error("Data received from the auth module is of wrong type!");
return {};
}
return ret;
@ -441,7 +444,7 @@ void Module::Shutdown() {
// Try to terminate the process gracefully in `kTerminateTimeoutSec`.
std::this_thread::sleep_for(std::chrono::milliseconds(100));
for (int i = 0; i < kTerminateTimeoutSec * 10; ++i) {
LOG(INFO) << "Terminating the auth module process with pid " << pid_;
spdlog::info("Terminating the auth module process with pid {}", pid_);
kill(pid_, SIGTERM);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
int ret = waitpid(pid_, &status_, WNOHANG | WUNTRACED);
@ -452,7 +455,7 @@ void Module::Shutdown() {
// If the process is still alive, kill it and wait for it to die.
if (waitpid(pid_, &status_, WNOHANG | WUNTRACED) == 0) {
LOG(WARNING) << "Killing the auth module process with pid " << pid_;
spdlog::warn("Killing the auth module process with pid {}", pid_);
kill(pid_, SIGKILL);
waitpid(pid_, &status_, 0);
}

View File

@ -7,7 +7,7 @@ set(communication_src_files
init.cpp)
add_library(mg-communication STATIC ${communication_src_files})
target_link_libraries(mg-communication Threads::Threads mg-utils mg-io fmt glog gflags)
target_link_libraries(mg-communication Threads::Threads mg-utils mg-io fmt gflags)
find_package(OpenSSL REQUIRED)
target_link_libraries(mg-communication ${OPENSSL_LIBRARIES})

View File

@ -1,7 +1,5 @@
#pragma once
#include <glog/logging.h>
#include "communication/bolt/v1/decoder/chunked_decoder_buffer.hpp"
#include "communication/bolt/v1/decoder/decoder.hpp"
#include "communication/bolt/v1/encoder/chunked_encoder_buffer.hpp"
@ -10,6 +8,7 @@
#include "communication/context.hpp"
#include "io/network/endpoint.hpp"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -23,7 +22,7 @@ class ClientQueryException : public utils::BasicException {
ClientQueryException() : utils::BasicException("Couldn't execute query!") {}
template <class... Args>
ClientQueryException(const std::string &code, Args &&... args)
ClientQueryException(const std::string &code, Args &&...args)
: utils::BasicException(std::forward<Args>(args)...), code_(code) {}
const std::string &code() const { return code_; }
@ -88,22 +87,22 @@ class Client final {
}
if (!client_.Write(kPreamble, sizeof(kPreamble), true)) {
DLOG(ERROR) << "Couldn't send preamble!";
SPDLOG_ERROR("Couldn't send preamble!");
throw ServerCommunicationException();
}
for (int i = 0; i < 4; ++i) {
if (!client_.Write(kProtocol, sizeof(kProtocol), i != 3)) {
DLOG(ERROR) << "Couldn't send protocol version!";
SPDLOG_ERROR("Couldn't send protocol version!");
throw ServerCommunicationException();
}
}
if (!client_.Read(sizeof(kProtocol))) {
DLOG(ERROR) << "Couldn't get negotiated protocol version!";
SPDLOG_ERROR("Couldn't get negotiated protocol version!");
throw ServerCommunicationException();
}
if (memcmp(kProtocol, client_.GetData(), sizeof(kProtocol)) != 0) {
DLOG(ERROR) << "Server negotiated unsupported protocol version!";
SPDLOG_ERROR("Server negotiated unsupported protocol version!");
throw ClientFatalException(
"The server negotiated an usupported protocol version!");
}
@ -112,22 +111,22 @@ class Client final {
if (!encoder_.MessageInit(client_name, {{"scheme", "basic"},
{"principal", username},
{"credentials", password}})) {
DLOG(ERROR) << "Couldn't send init message!";
SPDLOG_ERROR("Couldn't send init message!");
throw ServerCommunicationException();
}
Signature signature;
Value metadata;
if (!ReadMessage(&signature, &metadata)) {
DLOG(ERROR) << "Couldn't read init message response!";
SPDLOG_ERROR("Couldn't read init message response!");
throw ServerCommunicationException();
}
if (signature != Signature::Success) {
DLOG(ERROR) << "Handshake failed!";
SPDLOG_ERROR("Handshake failed!");
throw ClientFatalException("Handshake with the server failed!");
}
DLOG(INFO) << "Metadata of init message response: " << metadata;
SPDLOG_INFO("Metadata of init message response: {}", metadata);
}
/// Function used to execute queries against the server. Before you can
@ -143,13 +142,13 @@ class Client final {
"You must first connect to the server before using the client!");
}
DLOG(INFO) << "Sending run message with statement: '" << query
<< "'; parameters: " << parameters;
SPDLOG_INFO("Sending run message with statement: '{}'; parameters: {}",
query, parameters);
encoder_.MessageRun(query, parameters);
encoder_.MessagePullAll();
DLOG(INFO) << "Reading run message response";
SPDLOG_INFO("Reading run message response");
Signature signature;
Value fields;
if (!ReadMessage(&signature, &fields)) {
@ -177,7 +176,7 @@ class Client final {
throw ServerMalformedDataException();
}
DLOG(INFO) << "Reading pull_all message response";
SPDLOG_INFO("Reading pull_all message response");
Marker marker;
Value metadata;
std::vector<std::vector<Value>> records;

View File

@ -5,8 +5,6 @@
#include <thread>
#include <vector>
#include <glog/logging.h>
#include "communication/bolt/client.hpp"
namespace communication::bolt {

View File

@ -6,7 +6,6 @@
#include <vector>
#include <fmt/format.h>
#include <glog/logging.h>
#include "communication/bolt/v1/constants.hpp"

View File

@ -2,12 +2,11 @@
#include <string>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/value.hpp"
#include "utils/cast.hpp"
#include "utils/endian.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -153,14 +152,14 @@ class Decoder {
private:
bool ReadNull(const Marker &marker, Value *data) {
DCHECK(marker == Marker::Null) << "Received invalid marker!";
DMG_ASSERT(marker == Marker::Null, "Received invalid marker!");
*data = Value();
return true;
}
bool ReadBool(const Marker &marker, Value *data) {
DCHECK(marker == Marker::False || marker == Marker::True)
<< "Received invalid marker!";
DMG_ASSERT(marker == Marker::False || marker == Marker::True,
"Received invalid marker!");
if (marker == Marker::False) {
*data = Value(false);
} else {
@ -208,7 +207,7 @@ class Decoder {
bool ReadDouble(const Marker marker, Value *data) {
uint64_t value;
double ret;
DCHECK(marker == Marker::Float64) << "Received invalid marker!";
DMG_ASSERT(marker == Marker::Float64, "Received invalid marker!");
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&value), sizeof(value))) {
return false;
}
@ -264,14 +263,14 @@ class Decoder {
// `buffer_.Read(data->ValueString().data())`.
if (size < kMaxStackBuffer) {
if (!buffer_.Read(buffer, size)) {
DLOG(WARNING) << "[ReadString] Missing data!";
SPDLOG_WARN("[ReadString] Missing data!");
return false;
}
*data = Value(std::string(reinterpret_cast<char *>(buffer), size));
} else {
std::unique_ptr<uint8_t[]> ret(new uint8_t[size]);
if (!buffer_.Read(ret.get(), size)) {
DLOG(WARNING) << "[ReadString] Missing data!";
SPDLOG_WARN("[ReadString] Missing data!");
return false;
}
*data = Value(std::string(reinterpret_cast<char *>(ret.get()), size));

View File

@ -5,8 +5,6 @@
#include <memory>
#include <vector>
#include <glog/logging.h>
#include "communication/bolt/v1/constants.hpp"
namespace communication::bolt {

View File

@ -3,8 +3,6 @@
#include <optional>
#include <thread>
#include "glog/logging.h"
#include "communication/bolt/v1/constants.hpp"
#include "communication/bolt/v1/decoder/chunked_decoder_buffer.hpp"
#include "communication/bolt/v1/decoder/decoder.hpp"
@ -16,6 +14,7 @@
#include "communication/bolt/v1/states/handshake.hpp"
#include "communication/bolt/v1/states/init.hpp"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -105,8 +104,8 @@ class Session {
// Receive the handshake.
if (input_stream_.size() < kHandshakeSize) {
DLOG(WARNING) << fmt::format("Received partial handshake of size {}",
input_stream_.size());
spdlog::trace("Received partial handshake of size {}",
input_stream_.size());
return;
}
state_ = StateHandshakeRun(*this);

View File

@ -1,13 +1,13 @@
#pragma once
#include <fmt/format.h>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/state.hpp"
#include "communication/bolt/v1/value.hpp"
#include "utils/cast.hpp"
#include "utils/likely.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -22,13 +22,13 @@ State StateErrorRun(TSession &session, State state) {
Marker marker;
Signature signature;
if (!session.decoder_.ReadMessageHeader(&signature, &marker)) {
DLOG(WARNING) << "Missing header data!";
spdlog::trace("Missing header data!");
return State::Close;
}
if (UNLIKELY(signature == Signature::Noop && session.version_.major == 4 &&
session.version_.minor == 1)) {
DLOG(INFO) << "Received NOOP message";
spdlog::trace("Received NOOP message");
return state;
}
@ -38,13 +38,13 @@ State StateErrorRun(TSession &session, State state) {
if ((session.version_.major == 1 && signature == Signature::AckFailure) ||
signature == Signature::Reset) {
if (signature == Signature::AckFailure) {
DLOG(INFO) << "AckFailure received";
spdlog::trace("AckFailure received");
} else {
DLOG(INFO) << "Reset received";
spdlog::trace("Reset received");
}
if (!session.encoder_.MessageSuccess()) {
DLOG(WARNING) << "Couldn't send success message!";
spdlog::trace("Couldn't send success message!");
return State::Close;
}
@ -54,7 +54,7 @@ State StateErrorRun(TSession &session, State state) {
}
// We got AckFailure get back to right state.
CHECK(state == State::Error) << "Shouldn't happen";
MG_ASSERT(state == State::Error, "Shouldn't happen");
return State::Idle;
} else {
uint8_t value = utils::UnderlyingCast(marker);
@ -62,8 +62,8 @@ State StateErrorRun(TSession &session, State state) {
// All bolt client messages have less than 15 parameters so if we receive
// anything than a TinyStruct it's an error.
if ((value & 0xF0) != utils::UnderlyingCast(Marker::TinyStruct)) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02X}!", value);
spdlog::trace("Expected TinyStruct marker, but received 0x{:02X}!",
value);
return State::Close;
}
@ -72,15 +72,14 @@ State StateErrorRun(TSession &session, State state) {
Value dv;
for (int i = 0; i < value; ++i) {
if (!session.decoder_.ReadValue(&dv)) {
DLOG(WARNING) << fmt::format("Couldn't clean up parameter {} / {}!", i,
value);
spdlog::trace("Couldn't clean up parameter {} / {}!", i, value);
return State::Close;
}
}
// Ignore this message.
if (!session.encoder_.MessageIgnored()) {
DLOG(WARNING) << "Couldn't send ignored message!";
spdlog::trace("Couldn't send ignored message!");
return State::Close;
}

View File

@ -4,8 +4,6 @@
#include <new>
#include <string>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/constants.hpp"
#include "communication/bolt/v1/exceptions.hpp"
@ -13,6 +11,7 @@
#include "communication/bolt/v1/value.hpp"
#include "communication/exceptions.hpp"
#include "utils/likely.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -50,13 +49,13 @@ inline std::pair<std::string, std::string> ExceptionToErrorMessage(
if (dynamic_cast<const std::bad_alloc *>(&e)) {
// std::bad_alloc was thrown, God knows in which state is database ->
// terminate.
LOG(FATAL) << "Memgraph is out of memory";
LOG_FATAL("Memgraph is out of memory");
}
// All exceptions used in memgraph are derived from BasicException. Since
// we caught some other exception we don't know what is going on. Return
// DatabaseError, log real message and return generic string.
LOG(ERROR) << "Unknown exception occurred during query execution "
<< e.what();
spdlog::error("Unknown exception occurred during query execution {}",
e.what());
return {"Memgraph.DatabaseError.MemgraphError.MemgraphError",
"An unknown exception occurred, this is unexpected. Real message "
"should be in database logs."};
@ -64,16 +63,16 @@ inline std::pair<std::string, std::string> ExceptionToErrorMessage(
template <typename TSession>
inline State HandleFailure(TSession &session, const std::exception &e) {
DLOG(WARNING) << fmt::format("Error message: {}", e.what());
spdlog::trace("Error message: {}", e.what());
if (const auto *p = dynamic_cast<const utils::StacktraceException *>(&e)) {
DLOG(WARNING) << fmt::format("Error trace: {}", p->trace());
spdlog::trace("Error trace: {}", p->trace());
}
session.encoder_buffer_.Clear();
auto code_message = ExceptionToErrorMessage(e);
bool fail_sent = session.encoder_.MessageFailure(
{{"code", code_message.first}, {"message", code_message.second}});
if (!fail_sent) {
DLOG(WARNING) << "Couldn't send failure message!";
spdlog::trace("Couldn't send failure message!");
return State::Close;
}
return State::Error;
@ -87,27 +86,26 @@ State HandleRun(TSession &session, State state, Marker marker) {
const auto expected_marker =
session.version_.major == 1 ? Marker::TinyStruct2 : Marker::TinyStruct3;
if (marker != expected_marker) {
DLOG(WARNING) << fmt::format(
"Expected {} marker, but received 0x{:02X}!",
session.version_.major == 1 ? "TinyStruct2" : "TinyStruct3",
utils::UnderlyingCast(marker));
spdlog::trace("Expected {} marker, but received 0x{:02X}!",
session.version_.major == 1 ? "TinyStruct2" : "TinyStruct3",
utils::UnderlyingCast(marker));
return State::Close;
}
Value query, params, extra;
if (!session.decoder_.ReadValue(&query, Value::Type::String)) {
DLOG(WARNING) << "Couldn't read query string!";
spdlog::trace("Couldn't read query string!");
return State::Close;
}
if (!session.decoder_.ReadValue(&params, Value::Type::Map)) {
DLOG(WARNING) << "Couldn't read parameters!";
spdlog::trace("Couldn't read parameters!");
return State::Close;
}
if (session.version_.major == 4) {
if (!session.decoder_.ReadValue(&extra, Value::Type::Map)) {
DLOG(WARNING) << "Couldn't read extra field!";
spdlog::trace("Couldn't read extra field!");
}
}
@ -115,14 +113,14 @@ State HandleRun(TSession &session, State state, Marker marker) {
// Client could potentially recover if we move to error state, but there is
// no legitimate situation in which well working client would end up in this
// situation.
DLOG(WARNING) << "Unexpected RUN command!";
spdlog::trace("Unexpected RUN command!");
return State::Close;
}
DCHECK(!session.encoder_buffer_.HasData())
<< "There should be no data to write in this state";
DMG_ASSERT(!session.encoder_buffer_.HasData(),
"There should be no data to write in this state");
DLOG(INFO) << fmt::format("[Run] '{}'", query.ValueString());
spdlog::debug("[Run] '{}'", query.ValueString());
try {
// Interpret can throw.
@ -136,7 +134,7 @@ State HandleRun(TSession &session, State state, Marker marker) {
data.emplace("fields", std::move(vec));
// Send the header.
if (!session.encoder_.MessageSuccess(data)) {
DLOG(WARNING) << "Couldn't send query header!";
spdlog::trace("Couldn't send query header!");
return State::Close;
}
return State::Result;
@ -151,18 +149,17 @@ State HandlePullDiscard(TSession &session, State state, Marker marker) {
const auto expected_marker =
session.version_.major == 1 ? Marker::TinyStruct : Marker::TinyStruct1;
if (marker != expected_marker) {
DLOG(WARNING) << fmt::format(
"Expected {} marker, but received 0x{:02X}!",
session.version_.major == 1 ? "TinyStruct" : "TinyStruct1",
utils::UnderlyingCast(marker));
spdlog::trace("Expected {} marker, but received 0x{:02X}!",
session.version_.major == 1 ? "TinyStruct" : "TinyStruct1",
utils::UnderlyingCast(marker));
return State::Close;
}
if (state != State::Result) {
if constexpr (is_pull) {
DLOG(WARNING) << "Unexpected PULL!";
spdlog::trace("Unexpected PULL!");
} else {
DLOG(WARNING) << "Unexpected DISCARD!";
spdlog::trace("Unexpected DISCARD!");
}
// Same as `unexpected RUN` case.
return State::Close;
@ -175,7 +172,7 @@ State HandlePullDiscard(TSession &session, State state, Marker marker) {
if (session.version_.major == 4) {
Value extra;
if (!session.decoder_.ReadValue(&extra, Value::Type::Map)) {
DLOG(WARNING) << "Couldn't read extra field!";
spdlog::trace("Couldn't read extra field!");
}
const auto &extra_map = extra.ValueMap();
if (extra_map.count("n")) {
@ -202,7 +199,7 @@ State HandlePullDiscard(TSession &session, State state, Marker marker) {
}
if (!session.encoder_.MessageSuccess(summary)) {
DLOG(WARNING) << "Couldn't send query summary!";
spdlog::trace("Couldn't send query summary!");
return State::Close;
}
@ -239,9 +236,8 @@ State HandleReset(Session &session, State, Marker marker) {
// now this command only resets the session to a clean state. It
// does not IGNORE running and pending commands as it should.
if (marker != Marker::TinyStruct) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02X}!",
utils::UnderlyingCast(marker));
spdlog::trace("Expected TinyStruct marker, but received 0x{:02X}!",
utils::UnderlyingCast(marker));
return State::Close;
}
@ -249,7 +245,7 @@ State HandleReset(Session &session, State, Marker marker) {
session.encoder_buffer_.Clear();
if (!session.encoder_.MessageSuccess()) {
DLOG(WARNING) << "Couldn't send success message!";
spdlog::trace("Couldn't send success message!");
return State::Close;
}
@ -261,33 +257,32 @@ State HandleReset(Session &session, State, Marker marker) {
template <typename Session>
State HandleBegin(Session &session, State state, Marker marker) {
if (session.version_.major == 1) {
DLOG(WARNING) << "BEGIN messsage not supported in Bolt v1!";
spdlog::trace("BEGIN messsage not supported in Bolt v1!");
return State::Close;
}
if (marker != Marker::TinyStruct1) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct1 marker, but received 0x{:02x}!",
utils::UnderlyingCast(marker));
spdlog::trace("Expected TinyStruct1 marker, but received 0x{:02x}!",
utils::UnderlyingCast(marker));
return State::Close;
}
Value extra;
if (!session.decoder_.ReadValue(&extra, Value::Type::Map)) {
DLOG(WARNING) << "Couldn't read extra fields!";
spdlog::trace("Couldn't read extra fields!");
return State::Close;
}
if (state != State::Idle) {
DLOG(WARNING) << "Unexpected BEGIN command!";
spdlog::trace("Unexpected BEGIN command!");
return State::Close;
}
DCHECK(!session.encoder_buffer_.HasData())
<< "There should be no data to write in this state";
DMG_ASSERT(!session.encoder_buffer_.HasData(),
"There should be no data to write in this state");
if (!session.encoder_.MessageSuccess({})) {
DLOG(WARNING) << "Couldn't send success message!";
spdlog::trace("Couldn't send success message!");
return State::Close;
}
@ -303,28 +298,27 @@ State HandleBegin(Session &session, State state, Marker marker) {
template <typename Session>
State HandleCommit(Session &session, State state, Marker marker) {
if (session.version_.major == 1) {
DLOG(WARNING) << "COMMIT messsage not supported in Bolt v1!";
spdlog::trace("COMMIT messsage not supported in Bolt v1!");
return State::Close;
}
if (marker != Marker::TinyStruct) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02x}!",
utils::UnderlyingCast(marker));
spdlog::trace("Expected TinyStruct marker, but received 0x{:02x}!",
utils::UnderlyingCast(marker));
return State::Close;
}
if (state != State::Idle) {
DLOG(WARNING) << "Unexpected COMMIT command!";
spdlog::trace("Unexpected COMMIT command!");
return State::Close;
}
DCHECK(!session.encoder_buffer_.HasData())
<< "There should be no data to write in this state";
DMG_ASSERT(!session.encoder_buffer_.HasData(),
"There should be no data to write in this state");
try {
if (!session.encoder_.MessageSuccess({})) {
DLOG(WARNING) << "Couldn't send success message!";
spdlog::trace("Couldn't send success message!");
return State::Close;
}
session.CommitTransaction();
@ -337,28 +331,27 @@ State HandleCommit(Session &session, State state, Marker marker) {
template <typename Session>
State HandleRollback(Session &session, State state, Marker marker) {
if (session.version_.major == 1) {
DLOG(WARNING) << "ROLLBACK messsage not supported in Bolt v1!";
spdlog::trace("ROLLBACK messsage not supported in Bolt v1!");
return State::Close;
}
if (marker != Marker::TinyStruct) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02x}!",
utils::UnderlyingCast(marker));
spdlog::trace("Expected TinyStruct marker, but received 0x{:02x}!",
utils::UnderlyingCast(marker));
return State::Close;
}
if (state != State::Idle) {
DLOG(WARNING) << "Unexpected ROLLBACK command!";
spdlog::trace("Unexpected ROLLBACK command!");
return State::Close;
}
DCHECK(!session.encoder_buffer_.HasData())
<< "There should be no data to write in this state";
DMG_ASSERT(!session.encoder_buffer_.HasData(),
"There should be no data to write in this state");
try {
if (!session.encoder_.MessageSuccess({})) {
DLOG(WARNING) << "Couldn't send success message!";
spdlog::trace("Couldn't send success message!");
return State::Close;
}
session.RollbackTransaction();
@ -379,13 +372,13 @@ State StateExecutingRun(Session &session, State state) {
Marker marker;
Signature signature;
if (!session.decoder_.ReadMessageHeader(&signature, &marker)) {
DLOG(WARNING) << "Missing header data!";
spdlog::trace("Missing header data!");
return State::Close;
}
if (UNLIKELY(signature == Signature::Noop && session.version_.major == 4 &&
session.version_.minor == 1)) {
DLOG(INFO) << "Received NOOP message";
spdlog::trace("Received NOOP message");
return state;
}
@ -406,8 +399,8 @@ State StateExecutingRun(Session &session, State state) {
} else if (signature == Signature::Goodbye && session.version_.major != 1) {
throw SessionClosedException("Closing connection.");
} else {
DLOG(WARNING) << fmt::format("Unrecognized signature received (0x{:02X})!",
utils::UnderlyingCast(signature));
spdlog::trace("Unrecognized signature received (0x{:02X})!",
utils::UnderlyingCast(signature));
return State::Close;
}
}

View File

@ -1,13 +1,12 @@
#pragma once
#include <glog/logging.h>
#include <fmt/format.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/constants.hpp"
#include "communication/bolt/v1/state.hpp"
#include "utils/likely.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -21,12 +20,12 @@ State StateHandshakeRun(TSession &session) {
auto precmp =
std::memcmp(session.input_stream_.data(), kPreamble, sizeof(kPreamble));
if (UNLIKELY(precmp != 0)) {
DLOG(WARNING) << "Received a wrong preamble!";
spdlog::trace("Received a wrong preamble!");
return State::Close;
}
DCHECK(session.input_stream_.size() >= kHandshakeSize)
<< "Wrong size of the handshake data!";
DMG_ASSERT(session.input_stream_.size() >= kHandshakeSize,
"Wrong size of the handshake data!");
auto dataPosition = session.input_stream_.data() + sizeof(kPreamble);
@ -53,17 +52,17 @@ State StateHandshakeRun(TSession &session) {
session.version_.minor = protocol[2];
session.version_.major = protocol[3];
if (!session.version_.major) {
DLOG(WARNING) << "Server doesn't support any of the requested versions!";
spdlog::trace("Server doesn't support any of the requested versions!");
return State::Close;
}
if (!session.output_stream_.Write(protocol, sizeof(protocol))) {
DLOG(WARNING) << "Couldn't write handshake response!";
spdlog::trace("Couldn't write handshake response!");
return State::Close;
}
DLOG(INFO) << fmt::format("Using version {}.{} of protocol",
session.version_.major, session.version_.minor);
spdlog::info("Using version {}.{} of protocol", session.version_.major,
session.version_.minor);
// Delete data from the input stream. It is guaranteed that there will more
// than, or equal to 20 bytes (kHandshakeSize) in the buffer.

View File

@ -1,13 +1,13 @@
#pragma once
#include <fmt/format.h>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/state.hpp"
#include "communication/bolt/v1/value.hpp"
#include "communication/exceptions.hpp"
#include "utils/likely.hpp"
#include "utils/logging.hpp"
namespace communication::bolt {
@ -15,30 +15,29 @@ namespace detail {
template <typename TSession>
std::optional<Value> StateInitRunV1(TSession &session, const Marker marker) {
if (UNLIKELY(marker != Marker::TinyStruct2)) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct2 marker, but received 0x{:02X}!",
utils::UnderlyingCast(marker));
DLOG(WARNING) << "The client sent malformed data, but we are continuing "
"because the official Neo4j Java driver sends malformed "
"data. D'oh!";
spdlog::trace("Expected TinyStruct2 marker, but received 0x{:02X}!",
utils::UnderlyingCast(marker));
spdlog::trace(
"The client sent malformed data, but we are continuing "
"because the official Neo4j Java driver sends malformed "
"data. D'oh!");
// TODO: this should be uncommented when the Neo4j Java driver is fixed
// return State::Close;
}
Value client_name;
if (!session.decoder_.ReadValue(&client_name, Value::Type::String)) {
DLOG(WARNING) << "Couldn't read client name!";
spdlog::trace("Couldn't read client name!");
return std::nullopt;
}
Value metadata;
if (!session.decoder_.ReadValue(&metadata, Value::Type::Map)) {
DLOG(WARNING) << "Couldn't read metadata!";
spdlog::trace("Couldn't read metadata!");
return std::nullopt;
}
LOG(INFO) << fmt::format("Client connected '{}'", client_name.ValueString())
<< std::endl;
spdlog::info("Client connected '{}'", client_name.ValueString());
return metadata;
}
@ -46,31 +45,29 @@ std::optional<Value> StateInitRunV1(TSession &session, const Marker marker) {
template <typename TSession>
std::optional<Value> StateInitRunV4(TSession &session, const Marker marker) {
if (UNLIKELY(marker != Marker::TinyStruct1)) {
DLOG(WARNING) << fmt::format(
"Expected TinyStruct1 marker, but received 0x{:02X}!",
utils::UnderlyingCast(marker));
DLOG(WARNING) << "The client sent malformed data, but we are continuing "
"because the official Neo4j Java driver sends malformed "
"data. D'oh!";
spdlog::trace("Expected TinyStruct1 marker, but received 0x{:02X}!",
utils::UnderlyingCast(marker));
spdlog::trace(
"The client sent malformed data, but we are continuing "
"because the official Neo4j Java driver sends malformed "
"data. D'oh!");
// TODO: this should be uncommented when the Neo4j Java driver is fixed
// return State::Close;
}
Value metadata;
if (!session.decoder_.ReadValue(&metadata, Value::Type::Map)) {
DLOG(WARNING) << "Couldn't read metadata!";
spdlog::trace("Couldn't read metadata!");
return std::nullopt;
}
const auto &data = metadata.ValueMap();
if (!data.count("user_agent")) {
LOG(WARNING) << "The client didn't supply the user agent!";
spdlog::warn("The client didn't supply the user agent!");
return std::nullopt;
}
LOG(INFO) << fmt::format("Client connected '{}'",
data.at("user_agent").ValueString())
<< std::endl;
spdlog::info("Client connected '{}'", data.at("user_agent").ValueString());
return metadata;
}
@ -83,26 +80,25 @@ std::optional<Value> StateInitRunV4(TSession &session, const Marker marker) {
*/
template <typename Session>
State StateInitRun(Session &session) {
DCHECK(!session.encoder_buffer_.HasData())
<< "There should be no data to write in this state";
DMG_ASSERT(!session.encoder_buffer_.HasData(),
"There should be no data to write in this state");
Marker marker;
Signature signature;
if (!session.decoder_.ReadMessageHeader(&signature, &marker)) {
DLOG(WARNING) << "Missing header data!";
spdlog::trace("Missing header data!");
return State::Close;
}
if (UNLIKELY(signature == Signature::Noop && session.version_.major == 4 &&
session.version_.minor == 1)) {
DLOG(INFO) << "Received NOOP message";
SPDLOG_DEBUG("Received NOOP message");
return State::Init;
}
if (UNLIKELY(signature != Signature::Init)) {
DLOG(WARNING) << fmt::format(
"Expected Init signature, but received 0x{:02X}!",
utils::UnderlyingCast(signature));
spdlog::trace("Expected Init signature, but received 0x{:02X}!",
utils::UnderlyingCast(signature));
return State::Close;
}
@ -119,19 +115,19 @@ State StateInitRun(Session &session) {
std::string password;
auto &data = maybeMetadata->ValueMap();
if (!data.count("scheme")) {
LOG(WARNING) << "The client didn't supply authentication information!";
spdlog::warn("The client didn't supply authentication information!");
return State::Close;
}
if (data["scheme"].ValueString() == "basic") {
if (!data.count("principal") || !data.count("credentials")) {
LOG(WARNING) << "The client didn't supply authentication information!";
spdlog::warn("The client didn't supply authentication information!");
return State::Close;
}
username = data["principal"].ValueString();
password = data["credentials"].ValueString();
} else if (data["scheme"].ValueString() != "none") {
LOG(WARNING) << "Unsupported authentication scheme: "
<< data["scheme"].ValueString();
spdlog::warn("Unsupported authentication scheme: {}",
data["scheme"].ValueString());
return State::Close;
}
@ -140,7 +136,7 @@ State StateInitRun(Session &session) {
if (!session.encoder_.MessageFailure(
{{"code", "Memgraph.ClientError.Security.Unauthenticated"},
{"message", "Authentication failure"}})) {
DLOG(WARNING) << "Couldn't send failure message to the client!";
spdlog::trace("Couldn't send failure message to the client!");
}
// Throw an exception to indicate to the network stack that the session
// should be closed and cleaned up.
@ -160,7 +156,7 @@ State StateInitRun(Session &session) {
}
success_sent = session.encoder_.MessageSuccess(metadata);
if (!success_sent) {
DLOG(WARNING) << "Couldn't send success message to the client!";
spdlog::trace("Couldn't send success message to the client!");
return State::Close;
}
}

View File

@ -1,7 +1,7 @@
#include "glog/logging.h"
#include "communication/buffer.hpp"
#include "utils/logging.hpp"
namespace communication {
Buffer::Buffer()
@ -40,7 +40,7 @@ uint8_t *Buffer::data() { return data_.data(); }
size_t Buffer::size() const { return have_; }
void Buffer::Shift(size_t len) {
DCHECK(len <= have_) << "Tried to shift more data than the buffer has!";
DMG_ASSERT(len <= have_, "Tried to shift more data than the buffer has!");
if (len == have_) {
have_ = 0;
} else {
@ -50,15 +50,16 @@ void Buffer::Shift(size_t len) {
}
io::network::StreamBuffer Buffer::Allocate() {
DCHECK(data_.size() > have_) << "The buffer thinks that there is more data "
"in the buffer than there is underlying "
"storage space!";
DMG_ASSERT(data_.size() > have_,
"The buffer thinks that there is more data "
"in the buffer than there is underlying "
"storage space!");
return {data_.data() + have_, data_.size() - have_};
}
void Buffer::Written(size_t len) {
have_ += len;
DCHECK(have_ <= data_.size()) << "Written more than storage has space!";
DMG_ASSERT(have_ <= data_.size(), "Written more than storage has space!");
}
void Buffer::Resize(size_t len) {

View File

@ -1,7 +1,7 @@
#include <glog/logging.h>
#include "communication/client.hpp"
#include "communication/helpers.hpp"
#include "utils/logging.hpp"
namespace communication {
@ -32,7 +32,7 @@ bool Client::Connect(const io::network::Endpoint &endpoint) {
// Create a new SSL object that will be used for SSL communication.
ssl_ = SSL_new(context_->context());
if (ssl_ == nullptr) {
DLOG(ERROR) << "Couldn't create client SSL object!";
SPDLOG_ERROR("Couldn't create client SSL object!");
socket_.Close();
return false;
}
@ -43,7 +43,7 @@ bool Client::Connect(const io::network::Endpoint &endpoint) {
// handle that in our socket destructor).
bio_ = BIO_new_socket(socket_.fd(), BIO_NOCLOSE);
if (bio_ == nullptr) {
DLOG(ERROR) << "Couldn't create client BIO object!";
SPDLOG_ERROR("Couldn't create client BIO object!");
socket_.Close();
return false;
}
@ -59,7 +59,7 @@ bool Client::Connect(const io::network::Endpoint &endpoint) {
// Perform the TLS handshake.
auto ret = SSL_connect(ssl_);
if (ret != 1) {
DLOG(WARNING) << "Couldn't connect to SSL server: " << SslGetLastError();
SPDLOG_WARN("Couldn't connect to SSL server: {}", SslGetLastError());
socket_.Close();
return false;
}
@ -114,7 +114,7 @@ bool Client::Read(size_t len, bool exactly_len) {
continue;
} else {
// This is a fatal error.
DLOG(ERROR) << "Received an unexpected SSL error: " << err;
SPDLOG_ERROR("Received an unexpected SSL error: {}", err);
return false;
}
} else if (got == 0) {

View File

@ -1,7 +1,7 @@
#include <glog/logging.h>
#include "communication/context.hpp"
#include "utils/logging.hpp"
namespace communication {
ClientContext::ClientContext(bool use_ssl) : use_ssl_(use_ssl), ctx_(nullptr) {
@ -11,7 +11,7 @@ ClientContext::ClientContext(bool use_ssl) : use_ssl_(use_ssl), ctx_(nullptr) {
#else
ctx_ = SSL_CTX_new(TLS_client_method());
#endif
CHECK(ctx_ != nullptr) << "Couldn't create client SSL_CTX object!";
MG_ASSERT(ctx_ != nullptr, "Couldn't create client SSL_CTX object!");
// Disable legacy SSL support. Other options can be seen here:
// https://www.openssl.org/docs/man1.0.2/ssl/SSL_CTX_set_options.html
@ -23,12 +23,12 @@ ClientContext::ClientContext(const std::string &key_file,
const std::string &cert_file)
: ClientContext(true) {
if (key_file != "" && cert_file != "") {
CHECK(SSL_CTX_use_certificate_file(ctx_, cert_file.c_str(),
SSL_FILETYPE_PEM) == 1)
<< "Couldn't load client certificate from file: " << cert_file;
CHECK(SSL_CTX_use_PrivateKey_file(ctx_, key_file.c_str(),
SSL_FILETYPE_PEM) == 1)
<< "Couldn't load client private key from file: " << key_file;
MG_ASSERT(SSL_CTX_use_certificate_file(ctx_, cert_file.c_str(),
SSL_FILETYPE_PEM) == 1,
"Couldn't load client certificate from file: {}", cert_file);
MG_ASSERT(SSL_CTX_use_PrivateKey_file(ctx_, key_file.c_str(),
SSL_FILETYPE_PEM) == 1,
"Couldn't load client private key from file: ", key_file);
}
}
@ -81,12 +81,12 @@ ServerContext::ServerContext(const std::string &key_file,
{
// TODO (mferencevic): add support for encrypted private keys
// TODO (mferencevic): add certificate revocation list (CRL)
CHECK(SSL_CTX_use_certificate_file(ctx_, cert_file.c_str(),
SSL_FILETYPE_PEM) == 1)
<< "Couldn't load server certificate from file: " << cert_file;
CHECK(SSL_CTX_use_PrivateKey_file(ctx_, key_file.c_str(), SSL_FILETYPE_PEM) ==
1)
<< "Couldn't load server private key from file: " << key_file;
MG_ASSERT(SSL_CTX_use_certificate_file(ctx_, cert_file.c_str(),
SSL_FILETYPE_PEM) == 1,
"Couldn't load server certificate from file: {}", cert_file);
MG_ASSERT(SSL_CTX_use_PrivateKey_file(ctx_, key_file.c_str(),
SSL_FILETYPE_PEM) == 1,
"Couldn't load server private key from file: {}", key_file);
// Disable legacy SSL support. Other options can be seen here:
// https://www.openssl.org/docs/man1.0.2/ssl/SSL_CTX_set_options.html
@ -94,14 +94,15 @@ ServerContext::ServerContext(const std::string &key_file,
if (ca_file != "") {
// Load the certificate authority file.
CHECK(SSL_CTX_load_verify_locations(ctx_, ca_file.c_str(), nullptr) == 1)
<< "Couldn't load certificate authority from file: " << ca_file;
MG_ASSERT(
SSL_CTX_load_verify_locations(ctx_, ca_file.c_str(), nullptr) == 1,
"Couldn't load certificate authority from file: {}", ca_file);
if (verify_peer) {
// Add the CA to list of accepted CAs that is sent to the client.
STACK_OF(X509_NAME) *ca_names = SSL_load_client_CA_file(ca_file.c_str());
CHECK(ca_names != nullptr)
<< "Couldn't load certificate authority from file: " << ca_file;
MG_ASSERT(ca_names != nullptr,
"Couldn't load certificate authority from file: {}", ca_file);
// `ca_names` doesn' need to be free'd because we pass it to
// `SSL_CTX_set_client_CA_list`:
// https://mta.openssl.org/pipermail/openssl-users/2015-May/001363.html

View File

@ -1,12 +1,11 @@
#include "init.hpp"
#include <glog/logging.h>
#include <openssl/bio.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
#include <thread>
#include "utils/logging.hpp"
#include "utils/signals.hpp"
#include "utils/spin_lock.hpp"
@ -59,7 +58,8 @@ SSLInit::SSLInit() {
ERR_load_crypto_strings();
// Ignore SIGPIPE.
CHECK(utils::SignalIgnore(utils::Signal::Pipe)) << "Couldn't ignore SIGPIPE!";
MG_ASSERT(utils::SignalIgnore(utils::Signal::Pipe),
"Couldn't ignore SIGPIPE!");
SetupThreading();
}

View File

@ -8,11 +8,11 @@
#include <thread>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "communication/session.hpp"
#include "io/network/epoll.hpp"
#include "io/network/socket.hpp"
#include "utils/logging.hpp"
#include "utils/signals.hpp"
#include "utils/spin_lock.hpp"
#include "utils/thread.hpp"
@ -55,9 +55,9 @@ class Listener final {
for (auto &thread : worker_threads_) {
if (thread.joinable()) worker_alive = true;
}
CHECK(!alive_ && !worker_alive && !timeout_thread_.joinable())
<< "You should call Shutdown and AwaitShutdown on "
"communication::Listener!";
MG_ASSERT(!alive_ && !worker_alive && !timeout_thread_.joinable(),
"You should call Shutdown and AwaitShutdown on "
"communication::Listener!");
}
Listener(const Listener &) = delete;
@ -94,11 +94,10 @@ class Listener final {
* This function starts the listener
*/
void Start() {
CHECK(!alive_) << "The listener is already started!";
MG_ASSERT(!alive_, "The listener is already started!");
alive_.store(true);
std::cout << "Starting " << workers_count_ << " " << service_name_
<< " workers" << std::endl;
spdlog::info("Starting {} {} workers", workers_count_, service_name_);
std::string service_name(service_name_);
for (size_t i = 0; i < workers_count_; ++i) {
@ -118,8 +117,8 @@ class Listener final {
std::lock_guard<utils::SpinLock> guard(lock_);
for (auto &session : sessions_) {
if (session->TimedOut()) {
LOG(WARNING) << service_name << " session associated with "
<< session->socket().endpoint() << " timed out.";
spdlog::warn("{} session associated with {} timed out",
service_name, session->socket().endpoint());
// Here we shutdown the socket to terminate any leftover
// blocking `Write` calls and to signal an event that the
// session is closed. Session cleanup will be done in the event
@ -193,20 +192,20 @@ class Listener final {
;
} else if (event.events & EPOLLRDHUP) {
// The client closed the connection.
LOG(INFO) << service_name_ << " client " << session.socket().endpoint()
<< " closed the connection.";
spdlog::info("{} client {} closed the connection.", service_name_,
session.socket().endpoint());
CloseSession(session);
} else if (!(event.events & EPOLLIN) ||
event.events & (EPOLLHUP | EPOLLERR)) {
// There was an error on the server side.
LOG(ERROR) << "Error occured in " << service_name_
<< " session associated with " << session.socket().endpoint();
spdlog::error("Error occured in {} session associated with {}",
service_name_, session.socket().endpoint());
CloseSession(session);
} else {
// Unhandled epoll event.
LOG(ERROR) << "Unhandled event occured in " << service_name_
<< " session associated with " << session.socket().endpoint()
<< " events: " << event.events;
spdlog::error(
"Unhandled event occured in {} session associated with {} events: {}",
service_name_, session.socket().endpoint(), event.events);
CloseSession(session);
}
}
@ -221,16 +220,17 @@ class Listener final {
return false;
}
} catch (const SessionClosedException &e) {
LOG(INFO) << service_name_ << " client " << session.socket().endpoint()
<< " closed the connection.";
spdlog::info("{} client {} closed the connection.", service_name_,
session.socket().endpoint());
CloseSession(session);
return false;
} catch (const std::exception &e) {
// Catch all exceptions.
LOG(ERROR) << "Exception was thrown while processing event in "
<< service_name_ << " session associated with "
<< session.socket().endpoint()
<< " with message: " << e.what();
spdlog::error(
"Exception was thrown while processing event in {} session "
"associated with {}",
service_name_, session.socket().endpoint());
spdlog::debug("Exception message: {}", e.what());
CloseSession(session);
return false;
}
@ -248,8 +248,8 @@ class Listener final {
auto it = std::find_if(sessions_.begin(), sessions_.end(),
[&](const auto &l) { return l.get() == &session; });
CHECK(it != sessions_.end())
<< "Trying to remove session that is not found in sessions!";
MG_ASSERT(it != sessions_.end(),
"Trying to remove session that is not found in sessions!");
int i = it - sessions_.begin();
swap(sessions_[i], sessions_.back());

View File

@ -2,8 +2,6 @@
#include <map>
#include "glog/logging.h"
#include "communication/bolt/v1/value.hpp"
#include "glue/communication.hpp"
#include "query/typed_value.hpp"
@ -38,7 +36,7 @@ class ResultStreamFaker {
bvalues.reserve(values.size());
for (const auto &value : values) {
auto maybe_value = glue::ToBoltValue(value, *store_, storage::View::NEW);
CHECK(maybe_value.HasValue());
MG_ASSERT(maybe_value.HasValue());
bvalues.push_back(std::move(*maybe_value));
}
results_.push_back(std::move(bvalues));
@ -54,7 +52,7 @@ class ResultStreamFaker {
for (const auto &item : summary) {
auto maybe_value =
glue::ToBoltValue(item.second, *store_, storage::View::NEW);
CHECK(maybe_value.HasValue());
MG_ASSERT(maybe_value.HasValue());
bsummary.insert({item.first, std::move(*maybe_value)});
}
summary_ = std::move(bsummary);

View File

@ -8,11 +8,11 @@
#include <vector>
#include <fmt/format.h>
#include <glog/logging.h>
#include "communication/init.hpp"
#include "communication/listener.hpp"
#include "io/network/socket.hpp"
#include "utils/logging.hpp"
#include "utils/thread.hpp"
namespace communication {
@ -57,9 +57,10 @@ class Server final {
service_name_(service_name) {}
~Server() {
CHECK(!alive_ && !thread_.joinable()) << "You should call Shutdown and "
"AwaitShutdown on "
"communication::Server!";
MG_ASSERT(!alive_ && !thread_.joinable(),
"You should call Shutdown and "
"AwaitShutdown on "
"communication::Server!");
}
Server(const Server &) = delete;
@ -68,23 +69,24 @@ class Server final {
Server &operator=(Server &&) = delete;
const auto &endpoint() const {
CHECK(alive_) << "You can't get the server endpoint when it's not running!";
MG_ASSERT(alive_,
"You can't get the server endpoint when it's not running!");
return socket_.endpoint();
}
/// Starts the server
bool Start() {
CHECK(!alive_) << "The server was already started!";
MG_ASSERT(!alive_, "The server was already started!");
alive_.store(true);
if (!socket_.Bind(endpoint_)) {
LOG(ERROR) << "Cannot bind to socket on " << endpoint_;
spdlog::error("Cannot bind to socket on {}", endpoint_);
alive_.store(false);
return false;
}
socket_.SetTimeout(1, 0);
if (!socket_.Listen(1024)) {
LOG(ERROR) << "Cannot listen on socket!";
spdlog::error("Cannot listen on socket {}", endpoint_);
alive_.store(false);
return false;
}
@ -95,16 +97,14 @@ class Server final {
thread_ = std::thread([this, service_name]() {
utils::ThreadSetName(fmt::format("{} server", service_name));
std::cout << service_name_ << " server is fully armed and operational"
<< std::endl;
std::cout << service_name_ << " listening on " << socket_.endpoint()
<< std::endl;
spdlog::info("{} server is fully armed and operational", service_name_);
spdlog::info("{} listening on {}", service_name_, socket_.endpoint());
while (alive_) {
AcceptConnection();
}
std::cout << service_name << " shutting down..." << std::endl;
spdlog::info("{} shutting down...", service_name_);
});
return true;
@ -138,8 +138,8 @@ class Server final {
// Connection is not available anymore or configuration failed.
return;
}
LOG(INFO) << "Accepted a " << service_name_ << " connection from "
<< s->endpoint();
spdlog::info("Accepted a {} connection from {}", service_name_,
s->endpoint());
listener_.AddConnection(std::move(*s));
}

View File

@ -7,8 +7,6 @@
#include <mutex>
#include <thread>
#include <glog/logging.h>
#include <openssl/bio.h>
#include <openssl/err.h>
#include <openssl/ssl.h>
@ -19,6 +17,7 @@
#include "communication/helpers.hpp"
#include "io/network/socket.hpp"
#include "io/network/stream_buffer.hpp"
#include "utils/logging.hpp"
#include "utils/on_scope_exit.hpp"
#include "utils/spin_lock.hpp"
@ -97,14 +96,14 @@ class Session final {
if (context->use_ssl()) {
// Create a new SSL object that will be used for SSL communication.
ssl_ = SSL_new(context->context());
CHECK(ssl_ != nullptr) << "Couldn't create server SSL object!";
MG_ASSERT(ssl_ != nullptr, "Couldn't create server SSL object!");
// Create a new BIO (block I/O) SSL object so that OpenSSL can communicate
// using our socket. We specify `BIO_NOCLOSE` to indicate to OpenSSL that
// it doesn't need to close the socket when destructing all objects (we
// handle that in our socket destructor).
bio_ = BIO_new_socket(socket_.fd(), BIO_NOCLOSE);
CHECK(bio_ != nullptr) << "Couldn't create server BIO object!";
MG_ASSERT(bio_ != nullptr, "Couldn't create server BIO object!");
// Connect the BIO object to the SSL object so that OpenSSL knows which
// stream it should use for communication. We use the same object for both
@ -189,6 +188,10 @@ class Session final {
throw utils::BasicException(SslGetLastError());
} else {
// This is a fatal error.
spdlog::error(
"An unknown error occured while processing SSL message."
" Please make sure that you have SSL properly configured on "
"the server and the client.");
throw utils::BasicException(SslGetLastError());
}
} else if (len == 0) {

View File

@ -7,8 +7,7 @@
#include <thread>
#include <utility>
#include "glog/logging.h"
#include "utils/logging.hpp"
#include "utils/spin_lock.hpp"
/**
@ -38,7 +37,7 @@ class RingBuffer {
* available, there are no order-of-entrace guarantees.
*/
template <typename... TArgs>
void emplace(TArgs &&... args) {
void emplace(TArgs &&...args) {
while (true) {
{
std::lock_guard<utils::SpinLock> guard(lock_);
@ -50,8 +49,8 @@ class RingBuffer {
}
}
// Log a warning approximately once per second if buffer is full.
DLOG_EVERY_N(WARNING, 4000) << "RingBuffer full: worker waiting";
SPDLOG_WARN("RingBuffer full: worker waiting");
// Sleep time determined using tests/benchmark/ring_buffer.cpp
std::this_thread::sleep_for(std::chrono::microseconds(250));
}

View File

@ -5,7 +5,8 @@
#include <vector>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "utils/logging.hpp"
/// Reads the memgraph configuration files.
///
@ -23,9 +24,10 @@ inline void LoadConfig(const std::string &product_name) {
auto memgraph_config = getenv("MEMGRAPH_CONFIG");
if (memgraph_config != nullptr) {
auto path = fs::path(memgraph_config);
CHECK(fs::exists(path))
<< "MEMGRAPH_CONFIG environment variable set to nonexisting path: "
<< path.generic_string();
MG_ASSERT(
fs::exists(path),
"MEMGRAPH_CONFIG environment variable set to nonexisting path: {}",
path.generic_string());
configs.emplace_back(path);
}
}

View File

@ -5,4 +5,4 @@ set(io_src_files
network/utils.cpp)
add_library(mg-io STATIC ${io_src_files})
target_link_libraries(mg-io stdc++fs Threads::Threads fmt glog mg-utils)
target_link_libraries(mg-io stdc++fs Threads::Threads fmt mg-utils)

View File

@ -4,10 +4,9 @@
#include <algorithm>
#include "glog/logging.h"
#include "io/network/endpoint.hpp"
#include "io/network/network_error.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
namespace io::network {
@ -57,15 +56,15 @@ Endpoint::ParseSocketOrIpAddress(
try {
int_port = utils::ParseInt(parts[1]);
} catch (utils::BasicException &e) {
LOG(ERROR) << "Invalid port number: " << parts[1];
spdlog::error("Invalid port number: {}", parts[1]);
return std::nullopt;
}
if (int_port < 0) {
LOG(ERROR) << "Port number must be a positive integer!";
spdlog::error("Port number must be a positive integer!");
return std::nullopt;
}
if (int_port > std::numeric_limits<uint16_t>::max()) {
LOG(ERROR) << "Port number exceeded maximum possible size!";
spdlog::error("Port number exceeded maximum possible size!");
return std::nullopt;
}

View File

@ -2,13 +2,13 @@
#include <errno.h>
#include <fmt/format.h>
#include <glog/logging.h>
#include <malloc.h>
#include <sys/epoll.h>
#include "io/network/socket.hpp"
#include "utils/exceptions.hpp"
#include "utils/likely.hpp"
#include "utils/logging.hpp"
namespace io::network {
@ -26,8 +26,8 @@ class Epoll {
// epoll_create1 returns an error if there is a logical error in our code
// (for example invalid flags) or if there is irrecoverable error. In both
// cases it is best to terminate.
CHECK(epoll_fd_ != -1) << "Error on epoll create: (" << errno << ") "
<< strerror(errno);
MG_ASSERT(epoll_fd_ != -1, "Error on epoll create: ({}) {}", errno,
strerror(errno));
}
/**
@ -49,8 +49,8 @@ class Epoll {
// that case we could return an erorr and close connection. Chances of
// reaching system limit in normally working memgraph is extremely unlikely,
// so it is correct to terminate even in that case.
CHECK(!status) << "Error on epoll " << (modify ? "modify" : "add") << ": ("
<< errno << ") " << strerror(errno);
MG_ASSERT(!status, "Error on epoll {}: ({}) {}",
(modify ? "modify" : "add"), errno, strerror(errno));
}
/**
@ -76,8 +76,8 @@ class Epoll {
// that case we could return an erorr and close connection. Chances of
// reaching system limit in normally working memgraph is extremely unlikely,
// so it is correct to terminate even in that case.
CHECK(!status) << "Error on epoll delete: (" << errno << ") "
<< strerror(errno);
MG_ASSERT(!status, "Error on epoll delete: ({}) {}", errno,
strerror(errno));
}
/**
@ -91,8 +91,8 @@ class Epoll {
int Wait(Event *events, int max_events, int timeout) {
auto num_events = epoll_wait(epoll_fd_, events, max_events, timeout);
// If this check fails there was logical error in our code.
CHECK(num_events != -1 || errno == EINTR)
<< "Error on epoll wait: (" << errno << ") " << strerror(errno);
MG_ASSERT(num_events != -1 || errno == EINTR,
"Error on epoll wait: ({}) {}", errno, strerror(errno));
// num_events can be -1 if errno was EINTR (epoll_wait interrupted by signal
// handler). We treat that as no events, so we return 0.
return num_events == -1 ? 0 : num_events;

View File

@ -17,10 +17,9 @@
#include <sys/types.h>
#include <unistd.h>
#include "glog/logging.h"
#include "io/network/addrinfo.hpp"
#include "utils/likely.hpp"
#include "utils/logging.hpp"
namespace io::network {
@ -129,37 +128,40 @@ bool Socket::Bind(const Endpoint &endpoint) {
void Socket::SetNonBlocking() {
int flags = fcntl(socket_, F_GETFL, 0);
CHECK(flags != -1) << "Can't get socket mode";
MG_ASSERT(flags != -1, "Can't get socket mode");
flags |= O_NONBLOCK;
CHECK(fcntl(socket_, F_SETFL, flags) != -1) << "Can't set socket nonblocking";
MG_ASSERT(fcntl(socket_, F_SETFL, flags) != -1,
"Can't set socket nonblocking");
}
void Socket::SetKeepAlive() {
int optval = 1;
socklen_t optlen = sizeof(optval);
CHECK(!setsockopt(socket_, SOL_SOCKET, SO_KEEPALIVE, &optval, optlen))
<< "Can't set socket keep alive";
MG_ASSERT(!setsockopt(socket_, SOL_SOCKET, SO_KEEPALIVE, &optval, optlen),
"Can't set socket keep alive");
optval = 20; // wait 20s before sending keep-alive packets
CHECK(!setsockopt(socket_, SOL_TCP, TCP_KEEPIDLE, (void *)&optval, optlen))
<< "Can't set socket keep alive";
MG_ASSERT(
!setsockopt(socket_, SOL_TCP, TCP_KEEPIDLE, (void *)&optval, optlen),
"Can't set socket keep alive");
optval = 4; // 4 keep-alive packets must fail to close
CHECK(!setsockopt(socket_, SOL_TCP, TCP_KEEPCNT, (void *)&optval, optlen))
<< "Can't set socket keep alive";
MG_ASSERT(!setsockopt(socket_, SOL_TCP, TCP_KEEPCNT, (void *)&optval, optlen),
"Can't set socket keep alive");
optval = 15; // send keep-alive packets every 15s
CHECK(!setsockopt(socket_, SOL_TCP, TCP_KEEPINTVL, (void *)&optval, optlen))
<< "Can't set socket keep alive";
MG_ASSERT(
!setsockopt(socket_, SOL_TCP, TCP_KEEPINTVL, (void *)&optval, optlen),
"Can't set socket keep alive");
}
void Socket::SetNoDelay() {
int optval = 1;
socklen_t optlen = sizeof(optval);
CHECK(!setsockopt(socket_, SOL_TCP, TCP_NODELAY, (void *)&optval, optlen))
<< "Can't set socket no delay";
MG_ASSERT(!setsockopt(socket_, SOL_TCP, TCP_NODELAY, (void *)&optval, optlen),
"Can't set socket no delay");
}
void Socket::SetTimeout(long sec, long usec) {
@ -167,18 +169,18 @@ void Socket::SetTimeout(long sec, long usec) {
tv.tv_sec = sec;
tv.tv_usec = usec;
CHECK(!setsockopt(socket_, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)))
<< "Can't set socket timeout";
MG_ASSERT(!setsockopt(socket_, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv)),
"Can't set socket timeout");
CHECK(!setsockopt(socket_, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)))
<< "Can't set socket timeout";
MG_ASSERT(!setsockopt(socket_, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv)),
"Can't set socket timeout");
}
int Socket::ErrorStatus() const {
int optval;
socklen_t optlen = sizeof(optval);
auto status = getsockopt(socket_, SOL_SOCKET, SO_ERROR, &optval, &optlen);
CHECK(!status) << "getsockopt failed";
MG_ASSERT(!status, "getsockopt failed");
return optval;
}

View File

@ -8,10 +8,10 @@
#include <cstring>
#include <string>
#include "glog/logging.h"
#include "io/network/socket.hpp"
#include "utils/logging.hpp"
namespace io::network {
/// Resolves hostname to ip, if already an ip, just returns it
@ -23,10 +23,10 @@ std::string ResolveHostname(std::string hostname) {
int addr_result;
addrinfo *servinfo;
CHECK((addr_result =
getaddrinfo(hostname.c_str(), NULL, &hints, &servinfo)) == 0)
<< "Error with getaddrinfo:" << gai_strerror(addr_result);
CHECK(servinfo) << "Could not resolve address: " << hostname;
MG_ASSERT((addr_result =
getaddrinfo(hostname.c_str(), NULL, &hints, &servinfo)) == 0,
"Error with getaddrinfo: {}", gai_strerror(addr_result));
MG_ASSERT(servinfo, "Could not resolve address: {}", hostname);
std::string address;
if (servinfo->ai_family == AF_INET) {

View File

@ -1,6 +1,6 @@
# STATIC library used to store key-value pairs
add_library(mg-kvstore STATIC kvstore.cpp)
target_link_libraries(mg-kvstore stdc++fs mg-utils rocksdb bzip2 zlib glog gflags)
target_link_libraries(mg-kvstore stdc++fs mg-utils rocksdb bzip2 zlib gflags)
# STATIC library for dummy key-value storage
# add_library(mg-kvstore-dummy STATIC kvstore_dummy.cpp)

View File

@ -1,8 +1,7 @@
#include "kvstore/kvstore.hpp"
#include <glog/logging.h>
#include "utils/file.hpp"
#include "utils/logging.hpp"
namespace kvstore {
@ -13,41 +12,42 @@ KVStore::KVStore(std::filesystem::path storage) {}
KVStore::~KVStore() {}
bool KVStore::Put(const std::string &key, const std::string &value) {
CHECK(false)
<< "Unsupported operation (KVStore::Put) -- this is a dummy kvstore";
LOG_FATAL("Unsupported operation (KVStore::Put) -- this is a dummy kvstore");
}
bool KVStore::PutMultiple(const std::map<std::string, std::string> &items) {
CHECK(false) << "Unsupported operation (KVStore::PutMultiple) -- this is a "
"dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::PutMultiple) -- this is a "
"dummy kvstore");
}
std::optional<std::string> KVStore::Get(const std::string &key) const noexcept {
CHECK(false)
<< "Unsupported operation (KVStore::Get) -- this is a dummy kvstore";
LOG_FATAL("Unsupported operation (KVStore::Get) -- this is a dummy kvstore");
}
bool KVStore::Delete(const std::string &key) {
CHECK(false)
<< "Unsupported operation (KVStore::Delete) -- this is a dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::Delete) -- this is a dummy kvstore");
}
bool KVStore::DeleteMultiple(const std::vector<std::string> &keys) {
CHECK(false) << "Unsupported operation (KVStore::DeleteMultiple) -- this is "
"a dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::DeleteMultiple) -- this is "
"a dummy kvstore");
}
bool KVStore::DeletePrefix(const std::string &prefix) {
CHECK(false) << "Unsupported operation (KVStore::DeletePrefix) -- this is a "
"dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::DeletePrefix) -- this is a "
"dummy kvstore");
}
bool KVStore::PutAndDeleteMultiple(
const std::map<std::string, std::string> &items,
const std::vector<std::string> &keys) {
CHECK(false)
<< "Unsupported operation (KVStore::PutAndDeleteMultiple) -- this is a "
"dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::PutAndDeleteMultiple) -- this is a "
"dummy kvstore");
}
// iterator
@ -70,8 +70,9 @@ KVStore::iterator &KVStore::iterator::operator=(KVStore::iterator &&other) {
}
KVStore::iterator &KVStore::iterator::operator++() {
CHECK(false) << "Unsupported operation (&KVStore::iterator::operator++) -- "
"this is a dummy kvstore";
LOG_FATAL(
"Unsupported operation (&KVStore::iterator::operator++) -- "
"this is a dummy kvstore");
}
bool KVStore::iterator::operator==(const iterator &other) const { return true; }
@ -81,13 +82,15 @@ bool KVStore::iterator::operator!=(const iterator &other) const {
}
KVStore::iterator::reference KVStore::iterator::operator*() {
CHECK(false) << "Unsupported operation (KVStore::iterator::operator*)-- this "
"is a dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::iterator::operator*)-- this "
"is a dummy kvstore");
}
KVStore::iterator::pointer KVStore::iterator::operator->() {
CHECK(false) << "Unsupported operation (KVStore::iterator::operator->) -- "
"this is a dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::iterator::operator->) -- "
"this is a dummy kvstore");
}
void KVStore::iterator::SetInvalid() {}
@ -98,8 +101,9 @@ size_t KVStore::Size(const std::string &prefix) { return 0; }
bool KVStore::CompactRange(const std::string &begin_prefix,
const std::string &end_prefix) {
CHECK(false) << "Unsupported operation (KVStore::Compact) -- this is a "
"dummy kvstore";
LOG_FATAL(
"Unsupported operation (KVStore::Compact) -- this is a "
"dummy kvstore");
}
} // namespace kvstore

View File

@ -12,8 +12,11 @@
#include <string>
#include <thread>
#include <fmt/format.h>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <spdlog/common.h>
#include <spdlog/sinks/daily_file_sink.h>
#include <spdlog/sinks/stdout_color_sinks.h>
#include "communication/bolt/v1/constants.hpp"
#include "helpers.hpp"
@ -28,6 +31,7 @@
#include "telemetry/telemetry.hpp"
#include "utils/file.hpp"
#include "utils/flag_validation.hpp"
#include "utils/logging.hpp"
#include "utils/signals.hpp"
#include "utils/string.hpp"
#include "utils/sysinfo/memory.hpp"
@ -87,7 +91,6 @@ DEFINE_string(bolt_server_name_for_init, "",
// `mg_import_csv`. If you change it, make sure to change it there as well.
DEFINE_string(data_directory, "mg_data",
"Path to directory in which to save all permanent data.");
DEFINE_string(log_file, "", "Path to where the log should be stored.");
DEFINE_HIDDEN_string(
log_link_basename, "",
"Basename used for symlink creation to the last log file.");
@ -162,6 +165,93 @@ DEFINE_VALIDATED_string(
return false;
});
// Logging flags
DEFINE_bool(also_log_to_stderr, false,
"Log messages go to stderr in addition to logfiles");
DEFINE_string(log_file, "", "Path to where the log should be stored.");
namespace {
constexpr std::array log_level_mappings{
std::pair{"TRACE", spdlog::level::trace},
std::pair{"DEBUG", spdlog::level::debug},
std::pair{"INFO", spdlog::level::info},
std::pair{"WARNING", spdlog::level::warn},
std::pair{"ERROR", spdlog::level::err},
std::pair{"CRITICAL", spdlog::level::critical}};
std::string GetAllowedLogLevelsString() {
std::vector<std::string> allowed_log_levels;
allowed_log_levels.reserve(log_level_mappings.size());
std::transform(log_level_mappings.cbegin(), log_level_mappings.cend(),
std::back_inserter(allowed_log_levels),
[](const auto &mapping) { return mapping.first; });
return utils::Join(allowed_log_levels, ", ");
}
const std::string log_level_help_string = fmt::format(
"Minimum log level. Allowed values: {}", GetAllowedLogLevelsString());
} // namespace
DEFINE_VALIDATED_string(log_level, "WARNING", log_level_help_string.c_str(), {
if (value.empty()) {
std::cout << "Log level cannot be empty." << std::endl;
return false;
}
if (std::find_if(log_level_mappings.cbegin(), log_level_mappings.cend(),
[&](const auto &mapping) {
return mapping.first == value;
}) == log_level_mappings.cend()) {
std::cout << "Invalid value for log level. Allowed values: "
<< GetAllowedLogLevelsString() << std::endl;
return false;
}
return true;
});
namespace {
void ParseLogLevel() {
const auto mapping_iter = std::find_if(
log_level_mappings.cbegin(), log_level_mappings.cend(),
[](const auto &mapping) { return mapping.first == FLAGS_log_level; });
MG_ASSERT(mapping_iter != log_level_mappings.cend(), "Invalid log level");
spdlog::set_level(mapping_iter->second);
}
// 5 weeks * 7 days
constexpr auto log_retention_count = 35;
void ConfigureLogging() {
std::vector<spdlog::sink_ptr> loggers;
if (FLAGS_also_log_to_stderr) {
loggers.emplace_back(
std::make_shared<spdlog::sinks::stderr_color_sink_mt>());
}
if (!FLAGS_log_file.empty()) {
// get local time
time_t current_time;
struct tm *local_time{nullptr};
time(&current_time);
local_time = localtime(&current_time);
loggers.emplace_back(std::make_shared<spdlog::sinks::daily_file_sink_mt>(
FLAGS_log_file, local_time->tm_hour, local_time->tm_min, false,
log_retention_count));
}
spdlog::set_default_logger(std::make_shared<spdlog::logger>(
"memgraph_log", loggers.begin(), loggers.end()));
spdlog::flush_on(spdlog::level::trace);
ParseLogLevel();
}
} // namespace
/// Encapsulates Dbms and Interpreter that are passed through the network server
/// and worker to the session.
#ifdef MG_ENTERPRISE
@ -821,18 +911,12 @@ void InitSignalHandlers(const std::function<void()> &shutdown_fun) {
shutdown_fun();
};
CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::Terminate,
shutdown, block_shutdown_signals))
<< "Unable to register SIGTERM handler!";
CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::Interupt, shutdown,
block_shutdown_signals))
<< "Unable to register SIGINT handler!";
// Setup SIGUSR1 to be used for reopening log files, when e.g. logrotate
// rotates our logs.
CHECK(utils::SignalHandler::RegisterHandler(utils::Signal::User1, []() {
google::CloseLogDestination(google::INFO);
})) << "Unable to register SIGUSR1 handler!";
MG_ASSERT(utils::SignalHandler::RegisterHandler(
utils::Signal::Terminate, shutdown, block_shutdown_signals),
"Unable to register SIGTERM handler!");
MG_ASSERT(utils::SignalHandler::RegisterHandler(
utils::Signal::Interupt, shutdown, block_shutdown_signals),
"Unable to register SIGINT handler!");
}
int main(int argc, char **argv) {
@ -844,16 +928,14 @@ int main(int argc, char **argv) {
LoadConfig("memgraph");
gflags::ParseCommandLineFlags(&argc, &argv, true);
google::InitGoogleLogging(argv[0]);
google::SetLogDestination(google::INFO, FLAGS_log_file.c_str());
google::SetLogSymlink(google::INFO, FLAGS_log_link_basename.c_str());
ConfigureLogging();
// Unhandled exception handler init.
std::set_terminate(&utils::TerminateHandler);
// Initialize Python
auto *program_name = Py_DecodeLocale(argv[0], nullptr);
CHECK(program_name);
MG_ASSERT(program_name);
// Set program name, so Python can find its way to runtime libraries relative
// to executable.
Py_SetProgramName(program_name);
@ -870,16 +952,16 @@ int main(int argc, char **argv) {
auto gil = py::EnsureGIL();
auto maybe_exc = py::AppendToSysPath(py_support_dir.c_str());
if (maybe_exc) {
LOG(ERROR) << "Unable to load support for embedded Python: "
<< *maybe_exc;
spdlog::error("Unable to load support for embedded Python: {}",
*maybe_exc);
}
} else {
LOG(ERROR)
<< "Unable to load support for embedded Python: missing directory "
<< py_support_dir;
spdlog::error(
"Unable to load support for embedded Python: missing directory {}",
py_support_dir);
}
} catch (const std::filesystem::filesystem_error &e) {
LOG(ERROR) << "Unable to load support for embedded Python: " << e.what();
spdlog::error("Unable to load support for embedded Python: {}", e.what());
}
// Initialize the communication library.
@ -896,14 +978,15 @@ int main(int argc, char **argv) {
mem_log_scheduler.Run("Memory warning", std::chrono::seconds(3), [] {
auto free_ram = utils::sysinfo::AvailableMemoryKilobytes();
if (free_ram && *free_ram / 1024 < FLAGS_memory_warning_threshold)
LOG(WARNING) << "Running out of available RAM, only "
<< *free_ram / 1024 << " MB left.";
spdlog::warn("Running out of available RAM, only {} MB left",
*free_ram / 1024);
});
} else {
// Kernel version for the `MemAvailable` value is from: man procfs
LOG(WARNING) << "You have an older kernel version (<3.14) or the /proc "
"filesystem isn't available so remaining memory warnings "
"won't be available.";
spdlog::warn(
"You have an older kernel version (<3.14) or the /proc "
"filesystem isn't available so remaining memory warnings "
"won't be available.");
}
}
@ -936,9 +1019,10 @@ int main(int argc, char **argv) {
}
// Setup SIGUSR2 to be used for reopening audit log files, when e.g. logrotate
// rotates our audit logs.
CHECK(utils::SignalHandler::RegisterHandler(
utils::Signal::User2, [&audit_log]() { audit_log.ReopenLog(); }))
<< "Unable to register SIGUSR2 handler!";
MG_ASSERT(
utils::SignalHandler::RegisterHandler(
utils::Signal::User2, [&audit_log]() { audit_log.ReopenLog(); }),
"Unable to register SIGUSR2 handler!");
// End enterprise features initialization
#endif
@ -957,12 +1041,14 @@ int main(int argc, char **argv) {
.wal_file_flush_every_n_tx = FLAGS_storage_wal_file_flush_every_n_tx,
.snapshot_on_exit = FLAGS_storage_snapshot_on_exit}};
if (FLAGS_storage_snapshot_interval_sec == 0) {
LOG_IF(FATAL, FLAGS_storage_wal_enabled)
<< "In order to use write-ahead-logging you must enable "
"periodic snapshots by setting the snapshot interval to a "
"value larger than 0!";
db_config.durability.snapshot_wal_mode =
storage::Config::Durability::SnapshotWalMode::DISABLED;
if (FLAGS_storage_wal_enabled) {
LOG_FATAL(
"In order to use write-ahead-logging you must enable "
"periodic snapshots by setting the snapshot interval to a "
"value larger than 0!");
db_config.durability.snapshot_wal_mode =
storage::Config::Durability::SnapshotWalMode::DISABLED;
}
} else {
if (FLAGS_storage_wal_enabled) {
db_config.durability.snapshot_wal_mode = storage::Config::Durability::
@ -1002,9 +1088,9 @@ int main(int argc, char **argv) {
if (!FLAGS_bolt_key_file.empty() && !FLAGS_bolt_cert_file.empty()) {
context = ServerContext(FLAGS_bolt_key_file, FLAGS_bolt_cert_file);
service_name = "BoltS";
std::cout << "Using secure Bolt connection (with SSL)" << std::endl;
spdlog::info("Using secure Bolt connection (with SSL)");
} else {
std::cout << "Using non-secure Bolt connection (without SSL)" << std::endl;
spdlog::warn("Using non-secure Bolt connection (without SSL)");
}
ServerT server({FLAGS_bolt_address, static_cast<uint16_t>(FLAGS_bolt_port)},
@ -1025,16 +1111,17 @@ int main(int argc, char **argv) {
// Handler for regular termination signals
auto shutdown = [&server, &interpreter_context] {
// Server needs to be shutdown first and then the database. This prevents a
// race condition when a transaction is accepted during server shutdown.
// Server needs to be shutdown first and then the database. This prevents
// a race condition when a transaction is accepted during server shutdown.
server.Shutdown();
// After the server is notified to stop accepting and processing connections
// we tell the execution engine to stop processing all pending queries.
// After the server is notified to stop accepting and processing
// connections we tell the execution engine to stop processing all pending
// queries.
query::Shutdown(&interpreter_context);
};
InitSignalHandlers(shutdown);
CHECK(server.Start()) << "Couldn't start the Bolt server!";
MG_ASSERT(server.Start(), "Couldn't start the Bolt server!");
server.AwaitShutdown();
query::procedure::gModuleRegistry.UnloadAllModules();

View File

@ -1,5 +1,4 @@
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <algorithm>
#include <cstdio>
@ -12,6 +11,7 @@
#include "helpers.hpp"
#include "storage/v2/storage.hpp"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
#include "utils/timer.hpp"
#include "version.hpp"
@ -110,7 +110,7 @@ std::vector<std::string> ParseRepeatedFlag(const std::string &flagname,
value = argv[++i];
else if (!maybe_value.empty() && maybe_value.front() == '=')
value = maybe_value.substr(1);
CHECK(!value.empty()) << "The argument '" << flagname << "' is required";
MG_ASSERT(!value.empty(), "The argument '{}' is required", flagname);
values.push_back(value);
}
}
@ -419,7 +419,7 @@ std::string GetIdSpace(const std::string &type) {
"Expected the ID field to look like '[START_|END_]ID[(<id_space>)]', "
"but got '{}' instead",
type);
CHECK(res.size() == 4) << "Invalid regex match result!";
MG_ASSERT(res.size() == 4, "Invalid regex match result!");
return res[3];
}
@ -444,7 +444,7 @@ void ProcessNodeRow(storage::Storage *store, const std::vector<Field> &fields,
auto it = node_id_map->find(node_id);
if (it != node_id_map->end()) {
if (FLAGS_skip_duplicate_nodes) {
LOG(WARNING) << "Skipping duplicate node with ID '" << node_id << "'";
spdlog::warn("Skipping duplicate node with ID '{}'", node_id);
return;
} else {
throw LoadException("Node with ID '{}' already exists", node_id);
@ -500,7 +500,7 @@ void ProcessNodes(storage::Storage *store, const std::string &nodes_path,
std::unordered_map<NodeId, storage::Gid> *node_id_map,
const std::vector<std::string> &additional_labels) {
std::ifstream nodes_file(nodes_path);
CHECK(nodes_file) << "Unable to open '" << nodes_path << "'";
MG_ASSERT(nodes_file, "Unable to open '{}'", nodes_path);
uint64_t row_number = 1;
try {
if (!*header) {
@ -524,8 +524,8 @@ void ProcessNodes(storage::Storage *store, const std::string &nodes_path,
row_number += lines_count;
}
} catch (const LoadException &e) {
LOG(FATAL) << "Couldn't process row " << row_number << " of '" << nodes_path
<< "' because of: " << e.what();
LOG_FATAL("Couldn't process row {} of '{}' because of: {}", row_number,
nodes_path, e.what());
}
}
@ -551,8 +551,7 @@ void ProcessRelationshipsRow(
auto it = node_id_map.find(node_id);
if (it == node_id_map.end()) {
if (FLAGS_skip_bad_relationships) {
LOG(WARNING) << "Skipping bad relationship with START_ID '" << node_id
<< "'";
spdlog::warn("Skipping bad relationship with START_ID '{}'", node_id);
return;
} else {
throw LoadException("Node with ID '{}' does not exist", node_id);
@ -569,8 +568,7 @@ void ProcessRelationshipsRow(
auto it = node_id_map.find(node_id);
if (it == node_id_map.end()) {
if (FLAGS_skip_bad_relationships) {
LOG(WARNING) << "Skipping bad relationship with END_ID '" << node_id
<< "'";
spdlog::warn("Skipping bad relationship with END_ID '{}'", node_id);
return;
} else {
throw LoadException("Node with ID '{}' does not exist", node_id);
@ -629,7 +627,7 @@ void ProcessRelationships(
std::optional<std::vector<Field>> *header,
const std::unordered_map<NodeId, storage::Gid> &node_id_map) {
std::ifstream relationships_file(relationships_path);
CHECK(relationships_file) << "Unable to open '" << relationships_path << "'";
MG_ASSERT(relationships_file, "Unable to open '{}'", relationships_path);
uint64_t row_number = 1;
try {
if (!*header) {
@ -654,8 +652,8 @@ void ProcessRelationships(
row_number += lines_count;
}
} catch (const LoadException &e) {
LOG(FATAL) << "Couldn't process row " << row_number << " of '"
<< relationships_path << "' because of: " << e.what();
LOG_FATAL("Couldn't process row {} of '{}' because of: {}", row_number,
relationships_path, e.what());
}
}
@ -724,9 +722,8 @@ int main(int argc, char *argv[]) {
// overwrite the config.
LoadConfig("mg_import_csv");
gflags::ParseCommandLineFlags(&argc, &argv, true);
google::InitGoogleLogging(argv[0]);
CHECK(!nodes.empty()) << "The --nodes flag is required!";
MG_ASSERT(!nodes.empty(), "The --nodes flag is required!");
{
std::string upper = utils::ToUpperCase(utils::Trim(FLAGS_id_type));
@ -750,7 +747,7 @@ int main(int argc, char *argv[]) {
auto [files, additional_labels] = ParseNodesArgument(value);
std::optional<std::vector<Field>> header;
for (const auto &nodes_file : files) {
LOG(INFO) << "Loading " << nodes_file;
spdlog::info("Loading {}", nodes_file);
ProcessNodes(&store, nodes_file, &header, &node_id_map,
additional_labels);
}
@ -761,14 +758,14 @@ int main(int argc, char *argv[]) {
auto [files, type] = ParseRelationshipsArgument(value);
std::optional<std::vector<Field>> header;
for (const auto &relationships_file : files) {
LOG(INFO) << "Loading " << relationships_file;
spdlog::info("Loading {}", relationships_file);
ProcessRelationships(&store, relationships_file, type, &header,
node_id_map);
}
}
double load_sec = load_timer.Elapsed().count();
LOG(INFO) << "Loaded all data in " << fmt::format("{:.3f}", load_sec) << " s";
spdlog::info("Loaded all data in {:.3f}s", load_sec);
// The snapshot will be created in the storage destructor.

View File

@ -16,7 +16,7 @@
#error "Minimum supported Python API is 3.5"
#endif
#include <glog/logging.h>
#include "utils/logging.hpp"
namespace py {
@ -160,7 +160,7 @@ class [[nodiscard]] Object final {
/// Returned Object is nullptr if an error occurred.
/// @sa FetchError
template <class... TArgs>
Object Call(const TArgs &... args) const {
Object Call(const TArgs &...args) const {
return Object(PyObject_CallFunctionObjArgs(
ptr_, static_cast<PyObject *>(args)..., nullptr));
}
@ -180,7 +180,7 @@ class [[nodiscard]] Object final {
/// Returned Object is nullptr if an error occurred.
/// @sa FetchError
template <class... TArgs>
Object CallMethod(std::string_view meth_name, const TArgs &... args) const {
Object CallMethod(std::string_view meth_name, const TArgs &...args) const {
Object name(
PyUnicode_FromStringAndSize(meth_name.data(), meth_name.size()));
return Object(PyObject_CallMethodObjArgs(
@ -214,9 +214,9 @@ struct [[nodiscard]] ExceptionInfo final {
bool skip_first_line = false) {
if (!exc_info.type) return "";
Object traceback_mod(PyImport_ImportModule("traceback"));
CHECK(traceback_mod);
MG_ASSERT(traceback_mod);
Object format_exception_fn(traceback_mod.GetAttr("format_exception"));
CHECK(format_exception_fn);
MG_ASSERT(format_exception_fn);
Object traceback_root(exc_info.traceback);
if (skip_first_line && traceback_root) {
traceback_root = traceback_root.GetAttr("tb_next");
@ -224,7 +224,7 @@ struct [[nodiscard]] ExceptionInfo final {
auto list = format_exception_fn.Call(
exc_info.type, exc_info.value ? exc_info.value.Ptr() : Py_None,
traceback_root ? traceback_root.Ptr() : Py_None);
CHECK(list);
MG_ASSERT(list);
std::stringstream ss;
auto len = PyList_GET_SIZE(list.Ptr());
for (Py_ssize_t i = 0; i < len; ++i) {
@ -269,9 +269,9 @@ inline void RestoreError(ExceptionInfo exc_info) {
/// ExceptionInfo is returned if an error occurred.
[[nodiscard]] inline std::optional<ExceptionInfo> AppendToSysPath(
const char *dir) {
CHECK(dir);
MG_ASSERT(dir);
auto *py_path = PySys_GetObject("path");
CHECK(py_path);
MG_ASSERT(py_path);
py::Object import_dir(PyUnicode_FromString(dir));
if (!import_dir) return py::FetchError();
int import_dir_in_path = PySequence_Contains(py_path, import_dir.Ptr());

View File

@ -42,7 +42,7 @@ bool TypedValueCompare(const TypedValue &a, const TypedValue &b) {
throw QueryRuntimeException(
"Comparison is not defined for values of type {}.", a.type());
default:
LOG(FATAL) << "Unhandled comparison for types";
LOG_FATAL("Unhandled comparison for types");
}
}

View File

@ -4,8 +4,6 @@
#include <cstdint>
#include <string>
#include <glog/logging.h>
#include "query/db_accessor.hpp"
#include "query/exceptions.hpp"
#include "query/frontend/ast/ast.hpp"
@ -13,6 +11,7 @@
#include "query/typed_value.hpp"
#include "storage/v2/id_types.hpp"
#include "storage/v2/view.hpp"
#include "utils/logging.hpp"
namespace query {
@ -36,8 +35,8 @@ class TypedValueVectorCompare final {
const std::vector<TypedValue, TAllocator> &c2) const {
// ordering is invalid if there are more elements in the collections
// then there are in the ordering_ vector
CHECK(c1.size() <= ordering_.size() && c2.size() <= ordering_.size())
<< "Collections contain more elements then there are orderings";
MG_ASSERT(c1.size() <= ordering_.size() && c2.size() <= ordering_.size(),
"Collections contain more elements then there are orderings");
auto c1_it = c1.begin();
auto c2_it = c2.begin();

View File

@ -8,14 +8,13 @@
#include <utility>
#include <vector>
#include <glog/logging.h>
#include "query/db_accessor.hpp"
#include "query/exceptions.hpp"
#include "query/stream.hpp"
#include "query/typed_value.hpp"
#include "storage/v2/storage.hpp"
#include "utils/algorithm.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
namespace query {
@ -468,7 +467,7 @@ PullPlanDump::PullChunk PullPlanDump::CreateEdgePullChunk() {
vertex.OutEdges(storage::View::OLD));
}
auto &maybe_edges = *maybe_edge_iterable;
CHECK(maybe_edges.HasValue()) << "Invalid database state!";
MG_ASSERT(maybe_edges.HasValue(), "Invalid database state!");
auto current_edge_iter = maybe_current_edge_iter
? *maybe_current_edge_iter
: maybe_edges->begin();

View File

@ -468,11 +468,11 @@ cpp<#
Aggregation(Expression *expression1, Expression *expression2, Op op)
: BinaryOperator(expression1, expression2), op_(op) {
// COUNT without expression denotes COUNT(*) in cypher.
DCHECK(expression1 || op == Aggregation::Op::COUNT)
<< "All aggregations, except COUNT require expression";
DCHECK((expression2 == nullptr) ^ (op == Aggregation::Op::COLLECT_MAP))
<< "The second expression is obligatory in COLLECT_MAP and "
"invalid otherwise";
DMG_ASSERT(expression1 || op == Aggregation::Op::COUNT,
"All aggregations, except COUNT require expression");
DMG_ASSERT((expression2 == nullptr) ^ (op == Aggregation::Op::COLLECT_MAP),
"The second expression is obligatory in COLLECT_MAP and "
"invalid otherwise");
}
cpp<#)
(:private
@ -815,7 +815,7 @@ cpp<#
: arguments_(arguments),
function_name_(function_name),
function_(NameToFunction(function_name_)) {
DCHECK(function_) << "Unexpected missing function: " << function_name_;
DMG_ASSERT(function_, "Unexpected missing function: {}", function_name_);
}
cpp<#)
(:private
@ -2163,7 +2163,7 @@ cpp<#
#>cpp
explicit Unwind(NamedExpression *named_expression)
: named_expression_(named_expression) {
DCHECK(named_expression) << "Unwind cannot take nullptr for named_expression";
DMG_ASSERT(named_expression, "Unwind cannot take nullptr for named_expression");
}
cpp<#)
(:private

View File

@ -22,12 +22,11 @@
#include <utility>
#include <vector>
#include <glog/logging.h>
#include "query/exceptions.hpp"
#include "query/frontend/parsing.hpp"
#include "query/interpret/awesome_memgraph_functions.hpp"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
namespace query::frontend {
@ -36,8 +35,8 @@ const std::string CypherMainVisitor::kAnonPrefix = "anon";
antlrcpp::Any CypherMainVisitor::visitExplainQuery(
MemgraphCypher::ExplainQueryContext *ctx) {
CHECK(ctx->children.size() == 2)
<< "ExplainQuery should have exactly two children!";
MG_ASSERT(ctx->children.size() == 2,
"ExplainQuery should have exactly two children!");
auto *cypher_query = ctx->children[1]->accept(this).as<CypherQuery *>();
auto *explain_query = storage_->Create<ExplainQuery>();
explain_query->cypher_query_ = cypher_query;
@ -47,8 +46,8 @@ antlrcpp::Any CypherMainVisitor::visitExplainQuery(
antlrcpp::Any CypherMainVisitor::visitProfileQuery(
MemgraphCypher::ProfileQueryContext *ctx) {
CHECK(ctx->children.size() == 2)
<< "ProfileQuery should have exactly two children!";
MG_ASSERT(ctx->children.size() == 2,
"ProfileQuery should have exactly two children!");
auto *cypher_query = ctx->children[1]->accept(this).as<CypherQuery *>();
auto *profile_query = storage_->Create<ProfileQuery>();
profile_query->cypher_query_ = cypher_query;
@ -58,8 +57,8 @@ antlrcpp::Any CypherMainVisitor::visitProfileQuery(
antlrcpp::Any CypherMainVisitor::visitInfoQuery(
MemgraphCypher::InfoQueryContext *ctx) {
CHECK(ctx->children.size() == 2)
<< "InfoQuery should have exactly two children!";
MG_ASSERT(ctx->children.size() == 2,
"InfoQuery should have exactly two children!");
auto *info_query = storage_->Create<InfoQuery>();
query_ = info_query;
if (ctx->storageInfo()) {
@ -79,7 +78,7 @@ antlrcpp::Any CypherMainVisitor::visitInfoQuery(
antlrcpp::Any CypherMainVisitor::visitConstraintQuery(
MemgraphCypher::ConstraintQueryContext *ctx) {
auto *constraint_query = storage_->Create<ConstraintQuery>();
CHECK(ctx->CREATE() || ctx->DROP());
MG_ASSERT(ctx->CREATE() || ctx->DROP());
if (ctx->CREATE()) {
constraint_query->action_type_ = ConstraintQuery::ActionType::CREATE;
} else if (ctx->DROP()) {
@ -94,7 +93,7 @@ antlrcpp::Any CypherMainVisitor::visitConstraintQuery(
antlrcpp::Any CypherMainVisitor::visitConstraint(
MemgraphCypher::ConstraintContext *ctx) {
Constraint constraint;
CHECK(ctx->EXISTS() || ctx->UNIQUE() || (ctx->NODE() && ctx->KEY()));
MG_ASSERT(ctx->EXISTS() || ctx->UNIQUE() || (ctx->NODE() && ctx->KEY()));
if (ctx->EXISTS()) {
constraint.type = Constraint::Type::EXISTS;
} else if (ctx->UNIQUE()) {
@ -123,7 +122,7 @@ antlrcpp::Any CypherMainVisitor::visitConstraint(
antlrcpp::Any CypherMainVisitor::visitCypherQuery(
MemgraphCypher::CypherQueryContext *ctx) {
auto *cypher_query = storage_->Create<CypherQuery>();
CHECK(ctx->singleQuery()) << "Expected single query.";
MG_ASSERT(ctx->singleQuery(), "Expected single query.");
cypher_query->single_query_ =
ctx->singleQuery()->accept(this).as<SingleQuery *>();
@ -149,8 +148,8 @@ antlrcpp::Any CypherMainVisitor::visitCypherQuery(
antlrcpp::Any CypherMainVisitor::visitIndexQuery(
MemgraphCypher::IndexQueryContext *ctx) {
CHECK(ctx->children.size() == 1)
<< "IndexQuery should have exactly one child!";
MG_ASSERT(ctx->children.size() == 1,
"IndexQuery should have exactly one child!");
auto *index_query = ctx->children[0]->accept(this).as<IndexQuery *>();
query_ = index_query;
return index_query;
@ -182,8 +181,8 @@ antlrcpp::Any CypherMainVisitor::visitDropIndex(
antlrcpp::Any CypherMainVisitor::visitAuthQuery(
MemgraphCypher::AuthQueryContext *ctx) {
CHECK(ctx->children.size() == 1)
<< "AuthQuery should have exactly one child!";
MG_ASSERT(ctx->children.size() == 1,
"AuthQuery should have exactly one child!");
auto *auth_query = ctx->children[0]->accept(this).as<AuthQuery *>();
query_ = auth_query;
return auth_query;
@ -198,8 +197,8 @@ antlrcpp::Any CypherMainVisitor::visitDumpQuery(
antlrcpp::Any CypherMainVisitor::visitReplicationQuery(
MemgraphCypher::ReplicationQueryContext *ctx) {
CHECK(ctx->children.size() == 1)
<< "ReplicationQuery should have exactly one child!";
MG_ASSERT(ctx->children.size() == 1,
"ReplicationQuery should have exactly one child!");
auto *replication_query =
ctx->children[0]->accept(this).as<ReplicationQuery *>();
query_ = replication_query;
@ -304,7 +303,7 @@ antlrcpp::Any CypherMainVisitor::visitCypherUnion(
MemgraphCypher::CypherUnionContext *ctx) {
bool distinct = !ctx->ALL();
auto *cypher_union = storage_->Create<CypherUnion>(distinct);
DCHECK(ctx->singleQuery()) << "Expected single query.";
DMG_ASSERT(ctx->singleQuery(), "Expected single query.");
cypher_union->single_query_ =
ctx->singleQuery()->accept(this).as<SingleQuery *>();
return cypher_union;
@ -379,7 +378,7 @@ antlrcpp::Any CypherMainVisitor::visitSingleQuery(
}
has_update = has_return = has_optional_match = false;
} else {
DLOG(FATAL) << "Can't happen";
DLOG_FATAL("Can't happen");
}
}
bool is_standalone_call_procedure =
@ -467,7 +466,7 @@ antlrcpp::Any CypherMainVisitor::visitCreate(
antlrcpp::Any CypherMainVisitor::visitCallProcedure(
MemgraphCypher::CallProcedureContext *ctx) {
auto *call_proc = storage_->Create<CallProcedure>();
CHECK(!ctx->procedureName()->symbolicName().empty());
MG_ASSERT(!ctx->procedureName()->symbolicName().empty());
std::vector<std::string> procedure_subnames;
procedure_subnames.reserve(ctx->procedureName()->symbolicName().size());
for (auto *subname : ctx->procedureName()->symbolicName()) {
@ -484,7 +483,7 @@ antlrcpp::Any CypherMainVisitor::visitCallProcedure(
if (memory_limit_ctx->MB()) {
call_proc->memory_scale_ = 1024U * 1024U;
} else {
CHECK(memory_limit_ctx->KB());
MG_ASSERT(memory_limit_ctx->KB());
call_proc->memory_scale_ = 1024U;
}
}
@ -518,7 +517,8 @@ antlrcpp::Any CypherMainVisitor::visitCallProcedure(
call_proc->result_fields_.reserve(yield_ctx->procedureResult().size());
call_proc->result_identifiers_.reserve(yield_ctx->procedureResult().size());
for (auto *result : yield_ctx->procedureResult()) {
CHECK(result->variable().size() == 1 || result->variable().size() == 2);
MG_ASSERT(result->variable().size() == 1 ||
result->variable().size() == 2);
call_proc->result_fields_.push_back(
result->variable()[0]->accept(this).as<std::string>());
std::string result_alias;
@ -749,7 +749,7 @@ antlrcpp::Any CypherMainVisitor::visitPrivilege(
if (ctx->AUTH()) return AuthQuery::Privilege::AUTH;
if (ctx->CONSTRAINT()) return AuthQuery::Privilege::CONSTRAINT;
if (ctx->DUMP()) return AuthQuery::Privilege::DUMP;
LOG(FATAL) << "Should not get here - unknown privilege!";
LOG_FATAL("Should not get here - unknown privilege!");
}
/**
@ -828,7 +828,7 @@ antlrcpp::Any CypherMainVisitor::visitReturnItem(
MemgraphCypher::ReturnItemContext *ctx) {
auto *named_expr = storage_->Create<NamedExpression>();
named_expr->expression_ = ctx->expression()->accept(this);
CHECK(named_expr->expression_);
MG_ASSERT(named_expr->expression_);
if (ctx->variable()) {
named_expr->name_ =
std::string(ctx->variable()->accept(this).as<std::string>());
@ -933,9 +933,9 @@ antlrcpp::Any CypherMainVisitor::visitSymbolicName(
MemgraphCypher::SymbolicNameContext *ctx) {
if (ctx->EscapedSymbolicName()) {
auto quoted_name = ctx->getText();
DCHECK(quoted_name.size() >= 2U && quoted_name[0] == '`' &&
quoted_name.back() == '`')
<< "Can't happen. Grammar ensures this";
DMG_ASSERT(quoted_name.size() >= 2U && quoted_name[0] == '`' &&
quoted_name.back() == '`',
"Can't happen. Grammar ensures this");
// Remove enclosing backticks.
std::string escaped_name =
quoted_name.substr(1, static_cast<int>(quoted_name.size()) - 2);
@ -948,7 +948,7 @@ antlrcpp::Any CypherMainVisitor::visitSymbolicName(
name.push_back('`');
escaped = false;
} else {
DLOG(FATAL) << "Can't happen. Grammar ensures that.";
DLOG_FATAL("Can't happen. Grammar ensures that.");
}
} else if (c == '`') {
escaped = true;
@ -1141,13 +1141,13 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipPattern(
antlrcpp::Any CypherMainVisitor::visitRelationshipDetail(
MemgraphCypher::RelationshipDetailContext *) {
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
DLOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}
antlrcpp::Any CypherMainVisitor::visitRelationshipLambda(
MemgraphCypher::RelationshipLambdaContext *) {
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
DLOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}
@ -1162,8 +1162,8 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipTypes(
antlrcpp::Any CypherMainVisitor::visitVariableExpansion(
MemgraphCypher::VariableExpansionContext *ctx) {
DCHECK(ctx->expression().size() <= 2U)
<< "Expected 0, 1 or 2 bounds in range literal.";
DMG_ASSERT(ctx->expression().size() <= 2U,
"Expected 0, 1 or 2 bounds in range literal.");
EdgeAtom::Type edge_type = EdgeAtom::Type::DEPTH_FIRST;
if (!ctx->getTokens(MemgraphCypher::BFS).empty())
@ -1297,7 +1297,7 @@ antlrcpp::Any CypherMainVisitor::visitExpression8(
antlrcpp::Any CypherMainVisitor::visitPartialComparisonExpression(
MemgraphCypher::PartialComparisonExpressionContext *) {
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
DLOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}
@ -1377,7 +1377,7 @@ antlrcpp::Any CypherMainVisitor::visitExpression3a(
}
antlrcpp::Any CypherMainVisitor::visitStringAndNullOperators(
MemgraphCypher::StringAndNullOperatorsContext *) {
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
DLOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}
@ -1410,7 +1410,7 @@ antlrcpp::Any CypherMainVisitor::visitExpression3b(
antlrcpp::Any CypherMainVisitor::visitListIndexingOrSlicing(
MemgraphCypher::ListIndexingOrSlicingContext *) {
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
DLOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}
@ -1585,7 +1585,7 @@ antlrcpp::Any CypherMainVisitor::visitLiteral(
return static_cast<Expression *>(storage_->Create<PrimitiveLiteral>(
ctx->numberLiteral()->accept(this).as<TypedValue>(), token_position));
}
LOG(FATAL) << "Expected to handle all cases above";
LOG_FATAL("Expected to handle all cases above");
} else if (ctx->listLiteral()) {
return static_cast<Expression *>(storage_->Create<ListLiteral>(
ctx->listLiteral()->accept(this).as<std::vector<Expression *>>()));
@ -1612,7 +1612,7 @@ antlrcpp::Any CypherMainVisitor::visitNumberLiteral(
} else {
// This should never happen, except grammar changes and we don't notice
// change in this production.
DLOG(FATAL) << "can't happen";
DLOG_FATAL("can't happen");
throw std::exception();
}
}
@ -1694,7 +1694,7 @@ antlrcpp::Any CypherMainVisitor::visitBooleanLiteral(
if (ctx->getTokens(MemgraphCypher::FALSE).size()) {
return false;
}
DLOG(FATAL) << "Shouldn't happend";
DLOG_FATAL("Shouldn't happend");
throw std::exception();
}
@ -1821,7 +1821,7 @@ antlrcpp::Any CypherMainVisitor::visitCaseExpression(
antlrcpp::Any CypherMainVisitor::visitCaseAlternatives(
MemgraphCypher::CaseAlternativesContext *) {
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
DLOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}
@ -1847,7 +1847,7 @@ antlrcpp::Any CypherMainVisitor::visitMerge(MemgraphCypher::MergeContext *ctx) {
if (merge_action->MATCH()) {
merge->on_match_.insert(merge->on_match_.end(), set.begin(), set.end());
} else {
DCHECK(merge_action->CREATE()) << "Expected ON MATCH or ON CREATE";
DMG_ASSERT(merge_action->CREATE(), "Expected ON MATCH or ON CREATE");
merge->on_create_.insert(merge->on_create_.end(), set.begin(), set.end());
}
}
@ -1865,7 +1865,7 @@ antlrcpp::Any CypherMainVisitor::visitUnwind(
antlrcpp::Any CypherMainVisitor::visitFilterExpression(
MemgraphCypher::FilterExpressionContext *) {
LOG(FATAL) << "Should never be called. See documentation in hpp.";
LOG_FATAL("Should never be called. See documentation in hpp.");
return 0;
}

View File

@ -5,11 +5,11 @@
#include <utility>
#include <antlr4-runtime.h>
#include <glog/logging.h>
#include "query/frontend/ast/ast.hpp"
#include "query/frontend/opencypher/generated/MemgraphCypherBaseVisitor.h"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
namespace query {
namespace frontend {
@ -104,7 +104,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
std::vector<TExpression *> _expressions,
std::vector<antlr4::tree::ParseTree *> all_children,
const std::vector<size_t> &allowed_operators) {
DCHECK(_expressions.size()) << "can't happen";
DMG_ASSERT(_expressions.size(), "can't happen");
std::vector<Expression *> expressions;
auto operators = ExtractOperators(all_children, allowed_operators);
@ -125,7 +125,7 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
TExpression *_expression,
std::vector<antlr4::tree::ParseTree *> all_children,
const std::vector<size_t> &allowed_operators) {
DCHECK(_expression) << "can't happen";
DMG_ASSERT(_expression, "can't happen");
auto operators = ExtractOperators(all_children, allowed_operators);
Expression *expression = _expression->accept(this);

View File

@ -5,9 +5,8 @@
#include <locale>
#include <stdexcept>
#include <glog/logging.h>
#include "query/exceptions.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
namespace query::frontend {
@ -143,7 +142,7 @@ std::string ParseStringLiteral(const std::string &s) {
default:
// This should never happen, except grammar changes and we don't
// notice change in this production.
DLOG(FATAL) << "can't happen";
DLOG_FATAL("can't happen");
throw std::exception();
}
escape = false;
@ -165,12 +164,12 @@ double ParseDoubleLiteral(const std::string &s) {
}
std::string ParseParameter(const std::string &s) {
DCHECK(s[0] == '$') << "Invalid string passed as parameter name";
DMG_ASSERT(s[0] == '$', "Invalid string passed as parameter name");
if (s[1] != '`') return s.substr(1);
// If parameter name is escaped symbolic name then symbolic name should be
// unescaped and leading and trailing backquote should be removed.
DCHECK(s.size() > 3U && s.back() == '`')
<< "Invalid string passed as parameter name";
DMG_ASSERT(s.size() > 3U && s.back() == '`',
"Invalid string passed as parameter name");
std::string out;
for (int i = 2; i < static_cast<int>(s.size()) - 1; ++i) {
if (s[i] == '`') {

View File

@ -7,9 +7,8 @@
#include <optional>
#include <unordered_set>
#include "glog/logging.h"
#include "utils/algorithm.hpp"
#include "utils/logging.hpp"
namespace query {
@ -385,8 +384,8 @@ bool SymbolGenerator::PreVisit(Extract &extract) {
bool SymbolGenerator::PreVisit(Pattern &pattern) {
scope_.in_pattern = true;
if ((scope_.in_create || scope_.in_merge) && pattern.atoms_.size() == 1U) {
CHECK(utils::IsSubtype(*pattern.atoms_[0], NodeAtom::kType))
<< "Expected a single NodeAtom in Pattern";
MG_ASSERT(utils::IsSubtype(*pattern.atoms_[0], NodeAtom::kType),
"Expected a single NodeAtom in Pattern");
scope_.in_create_node = true;
}
return true;

View File

@ -5,6 +5,7 @@
#include "query/frontend/ast/ast.hpp"
#include "query/frontend/semantic/symbol.hpp"
#include "utils/logging.hpp"
namespace query {
@ -14,11 +15,11 @@ class SymbolTable final {
const Symbol &CreateSymbol(const std::string &name, bool user_declared,
Symbol::Type type = Symbol::Type::ANY,
int32_t token_position = -1) {
CHECK(table_.size() <= std::numeric_limits<int32_t>::max())
<< "SymbolTable size doesn't fit into 32-bit integer!";
MG_ASSERT(table_.size() <= std::numeric_limits<int32_t>::max(),
"SymbolTable size doesn't fit into 32-bit integer!");
auto got = table_.emplace(position_, Symbol(name, position_, user_declared,
type, token_position));
CHECK(got.second) << "Duplicate symbol ID!";
MG_ASSERT(got.second, "Duplicate symbol ID!");
position_++;
return got.first->second;
}

View File

@ -6,8 +6,6 @@
#include <string>
#include <vector>
#include "glog/logging.h"
#include "query/exceptions.hpp"
#include "query/frontend/opencypher/generated/MemgraphCypher.h"
#include "query/frontend/opencypher/generated/MemgraphCypherBaseVisitor.h"
@ -15,6 +13,7 @@
#include "query/frontend/parsing.hpp"
#include "query/frontend/stripped_lexer_constants.hpp"
#include "utils/fnv.hpp"
#include "utils/logging.hpp"
#include "utils/string.hpp"
namespace query::frontend {
@ -87,7 +86,7 @@ StrippedQuery::StrippedQuery(const std::string &query) : original_(query) {
int token_index = token_strings.size() + parameters_.size();
switch (token.first) {
case Token::UNMATCHED:
LOG(FATAL) << "Shouldn't happen";
LOG_FATAL("Shouldn't happen");
case Token::KEYWORD: {
// We don't strip NULL, since it can appear in special expressions
// like IS NULL and IS NOT NULL, but we strip true and false keywords.

View File

@ -404,7 +404,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
}
auto res = function.function_(arguments, function.arguments_.size(),
function_ctx);
CHECK(res.GetMemoryResource() == ctx_->memory);
MG_ASSERT(res.GetMemoryResource() == ctx_->memory);
return res;
} else {
TypedValue::TVector arguments(ctx_->memory);
@ -414,7 +414,7 @@ class ExpressionEvaluator : public ExpressionVisitor<TypedValue> {
}
auto res =
function.function_(arguments.data(), arguments.size(), function_ctx);
CHECK(res.GetMemoryResource() == ctx_->memory);
MG_ASSERT(res.GetMemoryResource() == ctx_->memory);
return res;
}
}

View File

@ -2,10 +2,9 @@
#include <vector>
#include <glog/logging.h>
#include "query/frontend/semantic/symbol_table.hpp"
#include "query/typed_value.hpp"
#include "utils/logging.hpp"
#include "utils/memory.hpp"
#include "utils/pmr/vector.hpp"
@ -15,11 +14,11 @@ class Frame {
public:
/// Create a Frame of given size backed by a utils::NewDeleteResource()
explicit Frame(int64_t size) : elems_(size, utils::NewDeleteResource()) {
CHECK(size >= 0);
MG_ASSERT(size >= 0);
}
Frame(int64_t size, utils::MemoryResource *memory) : elems_(size, memory) {
CHECK(size >= 0);
MG_ASSERT(size >= 0);
}
TypedValue &operator[](const Symbol &symbol) {

View File

@ -2,8 +2,6 @@
#include <limits>
#include <glog/logging.h>
#include "glue/communication.hpp"
#include "query/constants.hpp"
#include "query/context.hpp"
@ -22,6 +20,7 @@
#include "utils/algorithm.hpp"
#include "utils/exceptions.hpp"
#include "utils/flag_validation.hpp"
#include "utils/logging.hpp"
#include "utils/memory.hpp"
#include "utils/string.hpp"
#include "utils/tsc.hpp"
@ -92,8 +91,7 @@ ParsedQuery ParseQuery(
// If an exception was not thrown here, the stripper messed something
// up.
LOG(FATAL)
<< "The stripped query can't be parsed, but the original can.";
LOG_FATAL("The stripped query can't be parsed, but the original can.");
}
}
@ -363,7 +361,7 @@ Callback HandleAuthQuery(AuthQuery *auth_query, AuthQueryHandler *auth,
switch (auth_query->action_) {
case AuthQuery::Action::CREATE_USER:
callback.fn = [auth, username, password] {
CHECK(password.IsString() || password.IsNull());
MG_ASSERT(password.IsString() || password.IsNull());
if (!auth->CreateUser(username, password.IsString()
? std::make_optional(std::string(
password.ValueString()))
@ -383,7 +381,7 @@ Callback HandleAuthQuery(AuthQuery *auth_query, AuthQueryHandler *auth,
return callback;
case AuthQuery::Action::SET_PASSWORD:
callback.fn = [auth, username, password] {
CHECK(password.IsString() || password.IsNull());
MG_ASSERT(password.IsString() || password.IsNull());
auth->SetPassword(
username,
password.IsString()
@ -607,7 +605,7 @@ Callback HandleReplicationQuery(ReplicationQuery *repl_query,
Interpreter::Interpreter(InterpreterContext *interpreter_context)
: interpreter_context_(interpreter_context) {
CHECK(interpreter_context_) << "Interpreter context must not be NULL";
MG_ASSERT(interpreter_context_, "Interpreter context must not be NULL");
}
namespace {
@ -857,7 +855,7 @@ PreparedQuery Interpreter::PrepareTransactionQuery(
in_explicit_transaction_ = false;
};
} else {
LOG(FATAL) << "Should not get here -- unknown transaction query!";
LOG_FATAL("Should not get here -- unknown transaction query!");
}
return {{},
@ -920,10 +918,10 @@ PreparedQuery PrepareExplainQuery(
InterpreterContext *interpreter_context, DbAccessor *dba,
utils::MonotonicBufferResource *execution_memory) {
const std::string kExplainQueryStart = "explain ";
CHECK(
MG_ASSERT(
utils::StartsWith(utils::ToLowerCase(parsed_query.stripped_query.query()),
kExplainQueryStart))
<< "Expected stripped query to start with '" << kExplainQueryStart << "'";
kExplainQueryStart),
"Expected stripped query to start with '{}'", kExplainQueryStart);
// Parse and cache the inner query separately (as if it was a standalone
// query), producing a fresh AST. Note that currently we cannot just reuse
@ -937,8 +935,8 @@ PreparedQuery PrepareExplainQuery(
&interpreter_context->antlr_lock);
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
CHECK(cypher_query)
<< "Cypher grammar should not allow other queries in EXPLAIN";
MG_ASSERT(cypher_query,
"Cypher grammar should not allow other queries in EXPLAIN");
auto cypher_query_plan = CypherQueryToPlan(
parsed_inner_query.stripped_query.hash(),
@ -978,10 +976,10 @@ PreparedQuery PrepareProfileQuery(
utils::MonotonicBufferResource *execution_memory) {
const std::string kProfileQueryStart = "profile ";
CHECK(
MG_ASSERT(
utils::StartsWith(utils::ToLowerCase(parsed_query.stripped_query.query()),
kProfileQueryStart))
<< "Expected stripped query to start with '" << kProfileQueryStart << "'";
kProfileQueryStart),
"Expected stripped query to start with '{}'", kProfileQueryStart);
// PROFILE isn't allowed inside multi-command (explicit) transactions. This is
// because PROFILE executes each PROFILE'd query and collects additional
@ -1015,8 +1013,8 @@ PreparedQuery PrepareProfileQuery(
&interpreter_context->antlr_lock);
auto *cypher_query = utils::Downcast<CypherQuery>(parsed_inner_query.query);
CHECK(cypher_query)
<< "Cypher grammar should not allow other queries in PROFILE";
MG_ASSERT(cypher_query,
"Cypher grammar should not allow other queries in PROFILE");
auto cypher_query_plan = CypherQueryToPlan(
parsed_inner_query.stripped_query.hash(),
@ -1048,7 +1046,7 @@ PreparedQuery PrepareProfileQuery(
ProfilingStatsToTable(ctx->stats, ctx->profile_execution_time));
}
CHECK(ctx) << "Failed to execute the query!";
MG_ASSERT(ctx, "Failed to execute the query!");
if (pull_plan->Pull(stream, n)) {
summary->insert_or_assign(
@ -1118,7 +1116,7 @@ PreparedQuery PrepareIndexQuery(
if (properties.empty()) {
interpreter_context->db->CreateIndex(label);
} else {
CHECK(properties.size() == 1U);
MG_ASSERT(properties.size() == 1U);
interpreter_context->db->CreateIndex(label, properties[0]);
}
invalidate_plan_cache();
@ -1131,7 +1129,7 @@ PreparedQuery PrepareIndexQuery(
if (properties.empty()) {
interpreter_context->db->DropIndex(label);
} else {
CHECK(properties.size() == 1U);
MG_ASSERT(properties.size() == 1U);
interpreter_context->db->DropIndex(label, properties[0]);
}
invalidate_plan_cache();
@ -1403,7 +1401,7 @@ PreparedQuery PrepareConstraintQuery(
auto violation = res.GetError();
auto label_name =
interpreter_context->db->LabelToName(violation.label);
CHECK(violation.properties.size() == 1U);
MG_ASSERT(violation.properties.size() == 1U);
auto property_name = interpreter_context->db->PropertyToName(
*violation.properties.begin());
throw QueryRuntimeException(
@ -1650,7 +1648,7 @@ Interpreter::PrepareResult Interpreter::Prepare(
std::move(parsed_query), in_explicit_transaction_,
interpreter_context_, &*execution_db_accessor_);
} else {
LOG(FATAL) << "Should not get here -- unknown query type!";
LOG_FATAL("Should not get here -- unknown query type!");
}
query_execution->summary["planning_time"] =
@ -1694,7 +1692,7 @@ void Interpreter::Commit() {
case storage::ConstraintViolation::Type::EXISTENCE: {
auto label_name =
execution_db_accessor_->LabelToName(constraint_violation.label);
CHECK(constraint_violation.properties.size() == 1U);
MG_ASSERT(constraint_violation.properties.size() == 1U);
auto property_name = execution_db_accessor_->PropertyToName(
*constraint_violation.properties.begin());
execution_db_accessor_ = std::nullopt;

View File

@ -13,6 +13,7 @@
#include "query/plan/read_write_type_checker.hpp"
#include "query/stream.hpp"
#include "query/typed_value.hpp"
#include "utils/logging.hpp"
#include "utils/memory.hpp"
#include "utils/skip_list.hpp"
#include "utils/spin_lock.hpp"
@ -228,7 +229,7 @@ struct PlanCacheEntry {
*/
struct InterpreterContext {
explicit InterpreterContext(storage::Storage *db) : db(db) {
CHECK(db) << "Storage must not be NULL";
MG_ASSERT(db, "Storage must not be NULL");
}
storage::Storage *db;
@ -401,8 +402,8 @@ template <typename TStream>
std::map<std::string, TypedValue> Interpreter::Pull(TStream *result_stream,
std::optional<int> n,
std::optional<int> qid) {
CHECK(in_explicit_transaction_ || !qid)
<< "qid can be only used in explicit transaction!";
MG_ASSERT(in_explicit_transaction_ || !qid,
"qid can be only used in explicit transaction!");
const int qid_value =
qid ? *qid : static_cast<int>(query_executions_.size() - 1);
@ -418,8 +419,8 @@ std::map<std::string, TypedValue> Interpreter::Pull(TStream *result_stream,
auto &query_execution = query_executions_[qid_value];
CHECK(query_execution && query_execution->prepared_query)
<< "Query already finished executing!";
MG_ASSERT(query_execution && query_execution->prepared_query,
"Query already finished executing!");
// Each prepared query has its own summary so we need to somehow preserve
// it after it finishes executing because it gets destroyed alongside
@ -453,7 +454,7 @@ std::map<std::string, TypedValue> Interpreter::Pull(TStream *result_stream,
// The only cases in which we have nothing to do are those where
// we're either in an explicit transaction or the query is such that
// a transaction wasn't started on a call to `Prepare()`.
CHECK(in_explicit_transaction_ || !db_accessor_);
MG_ASSERT(in_explicit_transaction_ || !db_accessor_);
break;
}
// As the transaction is done we can clear all the executions

View File

@ -1,12 +1,11 @@
#pragma once
#include "glog/logging.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "storage/v2/property_value.hpp"
#include "utils/logging.hpp"
/**
* Encapsulates user provided parameters (and stripped literals)
@ -36,8 +35,8 @@ struct Parameters {
auto found =
std::find_if(storage_.begin(), storage_.end(),
[&](const auto &a) { return a.first == position; });
CHECK(found != storage_.end())
<< "Token position must be present in container";
MG_ASSERT(found != storage_.end(),
"Token position must be present in container");
return found->second;
}
@ -49,7 +48,7 @@ struct Parameters {
* @return Token position and value for sought param.
*/
const std::pair<int, storage::PropertyValue> &At(int position) const {
CHECK(position < static_cast<int>(storage_.size())) << "Invalid position";
MG_ASSERT(position < static_cast<int>(storage_.size()), "Invalid position");
return storage_[position];
}

View File

@ -3,9 +3,8 @@
#include <functional>
#include <utility>
#include "glog/logging.h"
#include "query/db_accessor.hpp"
#include "utils/logging.hpp"
#include "utils/memory.hpp"
#include "utils/pmr/vector.hpp"
@ -37,7 +36,7 @@ class Path {
* Allocations are done using the default utils::NewDeleteResource().
*/
template <typename... TOthers>
explicit Path(const VertexAccessor &vertex, const TOthers &... others)
explicit Path(const VertexAccessor &vertex, const TOthers &...others)
: vertices_(utils::NewDeleteResource()),
edges_(utils::NewDeleteResource()) {
Expand(vertex);
@ -51,7 +50,7 @@ class Path {
*/
template <typename... TOthers>
Path(std::allocator_arg_t, utils::MemoryResource *memory,
const VertexAccessor &vertex, const TOthers &... others)
const VertexAccessor &vertex, const TOthers &...others)
: vertices_(memory), edges_(memory) {
Expand(vertex);
Expand(others...);
@ -103,21 +102,21 @@ class Path {
/** Expands the path with the given vertex. */
void Expand(const VertexAccessor &vertex) {
DCHECK(vertices_.size() == edges_.size())
<< "Illegal path construction order";
DMG_ASSERT(vertices_.size() == edges_.size(),
"Illegal path construction order");
vertices_.emplace_back(vertex);
}
/** Expands the path with the given edge. */
void Expand(const EdgeAccessor &edge) {
DCHECK(vertices_.size() - 1 == edges_.size())
<< "Illegal path construction order";
DMG_ASSERT(vertices_.size() - 1 == edges_.size(),
"Illegal path construction order");
edges_.emplace_back(edge);
}
/** Expands the path with the given elements. */
template <typename TFirst, typename... TOthers>
void Expand(const TFirst &first, const TOthers &... others) {
void Expand(const TFirst &first, const TOthers &...others) {
Expand(first);
Expand(others...);
}

View File

@ -11,8 +11,6 @@
#include <unordered_set>
#include <utility>
#include "glog/logging.h"
#include <cppitertools/chain.hpp>
#include <cppitertools/imap.hpp>
@ -28,6 +26,7 @@
#include "utils/algorithm.hpp"
#include "utils/exceptions.hpp"
#include "utils/fnv.hpp"
#include "utils/logging.hpp"
#include "utils/pmr/unordered_map.hpp"
#include "utils/pmr/unordered_set.hpp"
#include "utils/pmr/vector.hpp"
@ -43,13 +42,13 @@
return visitor.PostVisit(*this); \
}
#define WITHOUT_SINGLE_INPUT(class_name) \
bool class_name::HasSingleInput() const { return false; } \
std::shared_ptr<LogicalOperator> class_name::input() const { \
LOG(FATAL) << "Operator " << #class_name << " has no single input!"; \
} \
void class_name::set_input(std::shared_ptr<LogicalOperator>) { \
LOG(FATAL) << "Operator " << #class_name << " has no single input!"; \
#define WITHOUT_SINGLE_INPUT(class_name) \
bool class_name::HasSingleInput() const { return false; } \
std::shared_ptr<LogicalOperator> class_name::input() const { \
LOG_FATAL("Operator " #class_name " has no single input!"); \
} \
void class_name::set_input(std::shared_ptr<LogicalOperator>) { \
LOG_FATAL("Operator " #class_name " has no single input!"); \
}
namespace query::plan {
@ -62,9 +61,9 @@ struct TypedValueVectorEqual {
template <class TAllocator>
bool operator()(const std::vector<TypedValue, TAllocator> &left,
const std::vector<TypedValue, TAllocator> &right) const {
CHECK(left.size() == right.size())
<< "TypedValueVector comparison should only be done over vectors "
"of the same size";
MG_ASSERT(left.size() == right.size(),
"TypedValueVector comparison should only be done over vectors "
"of the same size");
return std::equal(left.begin(), left.end(), right.begin(),
TypedValue::BoolEqual{});
}
@ -405,7 +404,7 @@ ScanAllByLabelPropertyRange::ScanAllByLabelPropertyRange(
property_name_(property_name),
lower_bound_(lower_bound),
upper_bound_(upper_bound) {
CHECK(lower_bound_ || upper_bound_) << "Only one bound can be left out";
MG_ASSERT(lower_bound_ || upper_bound_, "Only one bound can be left out");
}
ACCEPT_WITH_INPUT(ScanAllByLabelPropertyRange)
@ -472,7 +471,7 @@ ScanAllByLabelPropertyValue::ScanAllByLabelPropertyValue(
property_(property),
property_name_(property_name),
expression_(expression) {
DCHECK(expression) << "Expression is not optional.";
DMG_ASSERT(expression, "Expression is not optional.");
}
ACCEPT_WITH_INPUT(ScanAllByLabelPropertyValue)
@ -520,12 +519,11 @@ UniqueCursorPtr ScanAllByLabelProperty::MakeCursor(
mem, output_symbol_, input_->MakeCursor(mem), std::move(vertices));
}
ScanAllById::ScanAllById(const std::shared_ptr<LogicalOperator> &input,
Symbol output_symbol, Expression *expression,
storage::View view)
: ScanAll(input, output_symbol, view), expression_(expression) {
CHECK(expression);
MG_ASSERT(expression);
}
ACCEPT_WITH_INPUT(ScanAllById)
@ -622,7 +620,7 @@ bool Expand::ExpandCursor::Pull(Frame &frame, ExecutionContext &context) {
frame[self_.common_.node_symbol] = new_edge.To();
break;
case EdgeAtom::Direction::BOTH:
LOG(FATAL) << "Must indicate exact expansion direction here";
LOG_FATAL("Must indicate exact expansion direction here");
}
};
@ -746,13 +744,14 @@ ExpandVariable::ExpandVariable(
filter_lambda_(filter_lambda),
weight_lambda_(weight_lambda),
total_weight_(total_weight) {
DCHECK(type_ == EdgeAtom::Type::DEPTH_FIRST ||
type_ == EdgeAtom::Type::BREADTH_FIRST ||
type_ == EdgeAtom::Type::WEIGHTED_SHORTEST_PATH)
<< "ExpandVariable can only be used with breadth first, depth first or "
"weighted shortest path type";
DCHECK(!(type_ == EdgeAtom::Type::BREADTH_FIRST && is_reverse))
<< "Breadth first expansion can't be reversed";
DMG_ASSERT(
type_ == EdgeAtom::Type::DEPTH_FIRST ||
type_ == EdgeAtom::Type::BREADTH_FIRST ||
type_ == EdgeAtom::Type::WEIGHTED_SHORTEST_PATH,
"ExpandVariable can only be used with breadth first, depth first or "
"weighted shortest path type");
DMG_ASSERT(!(type_ == EdgeAtom::Type::BREADTH_FIRST && is_reverse),
"Breadth first expansion can't be reversed");
}
ACCEPT_WITH_INPUT(ExpandVariable)
@ -940,7 +939,7 @@ class ExpandVariableCursor : public Cursor {
utils::pmr::vector<TypedValue> *edges_on_frame) {
// We are placing an edge on the frame. It is possible that there already
// exists an edge on the frame for this level. If so first remove it.
DCHECK(edges_.size() > 0) << "Edges are empty";
DMG_ASSERT(edges_.size() > 0, "Edges are empty");
if (self_.is_reverse_) {
// TODO: This is innefficient, we should look into replacing
// vector with something else for TypedValue::List.
@ -1059,10 +1058,10 @@ class STShortestPathCursor : public query::plan::Cursor {
public:
STShortestPathCursor(const ExpandVariable &self, utils::MemoryResource *mem)
: self_(self), input_cursor_(self_.input()->MakeCursor(mem)) {
CHECK(self_.common_.existing_node)
<< "s-t shortest path algorithm should only "
"be used when `existing_node` flag is "
"set!";
MG_ASSERT(self_.common_.existing_node,
"s-t shortest path algorithm should only "
"be used when `existing_node` flag is "
"set!");
}
bool Pull(Frame &frame, ExecutionContext &context) override {
@ -1310,11 +1309,11 @@ class SingleSourceShortestPathCursor : public query::plan::Cursor {
processed_(mem),
to_visit_current_(mem),
to_visit_next_(mem) {
CHECK(!self_.common_.existing_node)
<< "Single source shortest path algorithm "
"should not be used when `existing_node` "
"flag is set, s-t shortest path algorithm "
"should be used instead!";
MG_ASSERT(!self_.common_.existing_node,
"Single source shortest path algorithm "
"should not be used when `existing_node` "
"flag is set, s-t shortest path algorithm "
"should be used instead!");
}
bool Pull(Frame &frame, ExecutionContext &context) override {
@ -1731,8 +1730,7 @@ UniqueCursorPtr ExpandVariable::MakeCursor(utils::MemoryResource *mem) const {
return MakeUniqueCursorPtr<ExpandWeightedShortestPathCursor>(mem, *this,
mem);
case EdgeAtom::Type::SINGLE:
LOG(FATAL)
<< "ExpandVariable should not be planned for a single expansion!";
LOG_FATAL("ExpandVariable should not be planned for a single expansion!");
}
}
@ -1748,8 +1746,8 @@ class ConstructNamedPathCursor : public Cursor {
if (!input_cursor_->Pull(frame, context)) return false;
auto symbol_it = self_.path_elements_.begin();
DCHECK(symbol_it != self_.path_elements_.end())
<< "Named path must contain at least one node";
DMG_ASSERT(symbol_it != self_.path_elements_.end(),
"Named path must contain at least one node");
const auto &start_vertex = frame[*symbol_it++];
auto *pull_memory = context.evaluation_context.memory;
@ -1759,8 +1757,8 @@ class ConstructNamedPathCursor : public Cursor {
return true;
}
DCHECK(start_vertex.IsVertex())
<< "First named path element must be a vertex";
DMG_ASSERT(start_vertex.IsVertex(),
"First named path element must be a vertex");
query::Path path(start_vertex.ValueVertex(), pull_memory);
// If the last path element symbol was for an edge list, then
@ -1800,7 +1798,7 @@ class ConstructNamedPathCursor : public Cursor {
break;
}
default:
LOG(FATAL) << "Unsupported type in named path construction";
LOG_FATAL("Unsupported type in named path construction");
break;
}
@ -2227,12 +2225,12 @@ bool SetProperties::SetPropertiesCursor::Pull(Frame &frame,
switch (lhs.type()) {
case TypedValue::Type::Vertex:
SetPropertiesOnRecord(context.db_accessor, &lhs.ValueVertex(),
rhs, self_.op_);
SetPropertiesOnRecord(context.db_accessor, &lhs.ValueVertex(), rhs,
self_.op_);
break;
case TypedValue::Type::Edge:
SetPropertiesOnRecord(context.db_accessor, &lhs.ValueEdge(),
rhs, self_.op_);
SetPropertiesOnRecord(context.db_accessor, &lhs.ValueEdge(), rhs,
self_.op_);
break;
case TypedValue::Type::Null:
// Skip setting properties on Null (can occur in optional match).
@ -2779,12 +2777,12 @@ class AggregateCursor : public Cursor {
* the AggregationValue has been initialized */
void Update(ExpressionEvaluator *evaluator,
AggregateCursor::AggregationValue *agg_value) {
DCHECK(self_.aggregations_.size() == agg_value->values_.size())
<< "Expected as much AggregationValue.values_ as there are "
"aggregations.";
DCHECK(self_.aggregations_.size() == agg_value->counts_.size())
<< "Expected as much AggregationValue.counts_ as there are "
"aggregations.";
DMG_ASSERT(self_.aggregations_.size() == agg_value->values_.size(),
"Expected as much AggregationValue.values_ as there are "
"aggregations.");
DMG_ASSERT(self_.aggregations_.size() == agg_value->counts_.size(),
"Expected as much AggregationValue.counts_ as there are "
"aggregations.");
// we iterate over counts, values and aggregation info at the same time
auto count_it = agg_value->counts_.begin();
@ -3113,9 +3111,9 @@ class OrderByCursor : public Cursor {
if (MustAbort(context)) throw HintedAbortError();
// place the output values on the frame
DCHECK(self_.output_symbols_.size() == cache_it_->remember.size())
<< "Number of values does not match the number of output symbols "
"in OrderBy";
DMG_ASSERT(self_.output_symbols_.size() == cache_it_->remember.size(),
"Number of values does not match the number of output symbols "
"in OrderBy");
auto output_sym_it = self_.output_symbols_.begin();
for (const TypedValue &output : cache_it_->remember)
frame[*output_sym_it++] = output;
@ -3213,7 +3211,7 @@ bool Merge::MergeCursor::Pull(Frame &frame, ExecutionContext &context) {
// and failed to pull from merge_match, we should create
__attribute__((unused)) bool merge_create_pull_result =
merge_create_cursor_->Pull(frame, context);
DCHECK(merge_create_pull_result) << "MergeCreate must never fail";
DMG_ASSERT(merge_create_pull_result, "MergeCreate must never fail");
return true;
}
// We have exhausted merge_match_cursor_ after 1 or more successful
@ -3555,10 +3553,10 @@ class CartesianCursor : public Cursor {
right_op_frame_(mem),
left_op_cursor_(self.left_op_->MakeCursor(mem)),
right_op_cursor_(self_.right_op_->MakeCursor(mem)) {
CHECK(left_op_cursor_ != nullptr)
<< "CartesianCursor: Missing left operator cursor.";
CHECK(right_op_cursor_ != nullptr)
<< "CartesianCursor: Missing right operator cursor.";
MG_ASSERT(left_op_cursor_ != nullptr,
"CartesianCursor: Missing left operator cursor.");
MG_ASSERT(right_op_cursor_ != nullptr,
"CartesianCursor: Missing right operator cursor.");
}
bool Pull(Frame &frame, ExecutionContext &context) override {
@ -3660,8 +3658,8 @@ class OutputTableCursor : public Cursor {
if (!pulled_) {
rows_ = self_.callback_(&frame, &context);
for (const auto &row : rows_) {
CHECK(row.size() == self_.output_symbols_.size())
<< "Wrong number of columns in row!";
MG_ASSERT(row.size() == self_.output_symbols_.size(),
"Wrong number of columns in row!");
}
pulled_ = true;
}
@ -3712,8 +3710,8 @@ class OutputTableStreamCursor : public Cursor {
bool Pull(Frame &frame, ExecutionContext &context) override {
const auto row = self_->callback_(&frame, &context);
if (row) {
CHECK(row->size() == self_->output_symbols_.size())
<< "Wrong number of columns in row!";
MG_ASSERT(row->size() == self_->output_symbols_.size(),
"Wrong number of columns in row!");
for (size_t i = 0; i < self_->output_symbols_.size(); ++i) {
frame[self_->output_symbols_[i]] = row->at(i);
}
@ -3820,7 +3818,7 @@ void CallCustomProcedure(const std::string_view &fully_qualified_procedure_name,
name = proc.args[i].first;
type = proc.args[i].second;
} else {
CHECK(proc.opt_args.size() > i - proc.args.size());
MG_ASSERT(proc.opt_args.size() > i - proc.args.size());
name = std::get<0>(proc.opt_args[i - proc.args.size()]);
type = std::get<1>(proc.opt_args[i - proc.args.size()]);
}
@ -3832,29 +3830,30 @@ void CallCustomProcedure(const std::string_view &fully_qualified_procedure_name,
proc_args.elems.emplace_back(std::move(arg), &graph);
}
// Fill missing optional arguments with their default values.
CHECK(args.size() >= proc.args.size());
MG_ASSERT(args.size() >= proc.args.size());
size_t passed_in_opt_args = args.size() - proc.args.size();
CHECK(passed_in_opt_args <= proc.opt_args.size());
MG_ASSERT(passed_in_opt_args <= proc.opt_args.size());
for (size_t i = passed_in_opt_args; i < proc.opt_args.size(); ++i) {
proc_args.elems.emplace_back(std::get<2>(proc.opt_args[i]), &graph);
}
if (memory_limit) {
DLOG(INFO) << "Running '" << fully_qualified_procedure_name
<< "' with memory limit of " << *memory_limit << " bytes";
SPDLOG_INFO("Running '{}' with memory limit of {} bytes",
fully_qualified_procedure_name, *memory_limit);
utils::LimitedMemoryResource limited_mem(memory, *memory_limit);
mgp_memory proc_memory{&limited_mem};
CHECK(result->signature == &proc.results);
MG_ASSERT(result->signature == &proc.results);
// TODO: What about cross library boundary exceptions? OMG C++?!
proc.cb(&proc_args, &graph, result, &proc_memory);
size_t leaked_bytes = limited_mem.GetAllocatedBytes();
LOG_IF(WARNING, leaked_bytes > 0U)
<< "Query procedure '" << fully_qualified_procedure_name << "' leaked "
<< leaked_bytes << " *tracked* bytes";
if (leaked_bytes > 0U) {
spdlog::warn("Query procedure '{}' leaked {} *tracked* bytes",
fully_qualified_procedure_name, leaked_bytes);
}
} else {
// TODO: Add a tracking MemoryResource without limits, so that we report
// memory leaks in procedure.
mgp_memory proc_memory{memory};
CHECK(result->signature == &proc.results);
MG_ASSERT(result->signature == &proc.results);
// TODO: What about cross library boundary exceptions? OMG C++?!
proc.cb(&proc_args, &graph, result, &proc_memory);
}
@ -3877,8 +3876,8 @@ class CallProcedureCursor : public Cursor {
// rows are produced. Therefore, we use the memory dedicated for the
// whole execution.
result_(nullptr, mem) {
CHECK(self_->result_fields_.size() == self_->result_symbols_.size())
<< "Incorrectly constructed CallProcedure";
MG_ASSERT(self_->result_fields_.size() == self_->result_symbols_.size(),
"Incorrectly constructed CallProcedure");
}
bool Pull(Frame &frame, ExecutionContext &context) override {
@ -3948,7 +3947,8 @@ class CallProcedureCursor : public Cursor {
if (values.size() != result_signature_size_) {
throw QueryRuntimeException(
"Procedure '{}' did not yield all fields as required by its "
"signature.", self_->procedure_name_);
"signature.",
self_->procedure_name_);
}
for (size_t i = 0; i < self_->result_fields_.size(); ++i) {
std::string_view field_name(self_->result_fields_[i]);

View File

@ -19,6 +19,7 @@
#include "utils/fnv.hpp"
#include "utils/memory.hpp"
#include "utils/visitor.hpp"
#include "utils/logging.hpp"
cpp<#
(lcp:namespace query)
@ -2059,7 +2060,7 @@ of symbols used by each of the inputs.")
std::vector<std::vector<TypedValue>> rows);
bool Accept(HierarchicalLogicalOperatorVisitor &) override {
LOG(FATAL) << "OutputTable operator should not be visited!";
LOG_FATAL("OutputTable operator should not be visited!");
}
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;
@ -2093,7 +2094,7 @@ at once. Instead, each call of the callback should return a single row of the ta
callback);
bool Accept(HierarchicalLogicalOperatorVisitor &) override {
LOG(FATAL) << "OutputTableStream operator should not be visited!";
LOG_FATAL("OutputTableStream operator should not be visited!");
}
UniqueCursorPtr MakeCursor(utils::MemoryResource *) const override;

View File

@ -11,20 +11,20 @@ namespace {
void ForEachPattern(
Pattern &pattern, std::function<void(NodeAtom *)> base,
std::function<void(NodeAtom *, EdgeAtom *, NodeAtom *)> collect) {
DCHECK(!pattern.atoms_.empty()) << "Missing atoms in pattern";
DMG_ASSERT(!pattern.atoms_.empty(), "Missing atoms in pattern");
auto atoms_it = pattern.atoms_.begin();
auto current_node = utils::Downcast<NodeAtom>(*atoms_it++);
DCHECK(current_node) << "First pattern atom is not a node";
DMG_ASSERT(current_node, "First pattern atom is not a node");
base(current_node);
// Remaining atoms need to follow sequentially as (EdgeAtom, NodeAtom)*
while (atoms_it != pattern.atoms_.end()) {
auto edge = utils::Downcast<EdgeAtom>(*atoms_it++);
DCHECK(edge) << "Expected an edge atom in pattern.";
DCHECK(atoms_it != pattern.atoms_.end())
<< "Edge atom should not end the pattern.";
DMG_ASSERT(edge, "Expected an edge atom in pattern.");
DMG_ASSERT(atoms_it != pattern.atoms_.end(),
"Edge atom should not end the pattern.");
auto prev_node = current_node;
current_node = utils::Downcast<NodeAtom>(*atoms_it++);
DCHECK(current_node) << "Expected a node atom in pattern.";
DMG_ASSERT(current_node, "Expected a node atom in pattern.");
collect(prev_node, edge, current_node);
}
}
@ -67,7 +67,7 @@ std::vector<Expansion> NormalizePatterns(
for (const auto &pattern : patterns) {
if (pattern->atoms_.size() == 1U) {
auto *node = utils::Downcast<NodeAtom>(pattern->atoms_[0]);
DCHECK(node) << "First pattern atom is not a node";
DMG_ASSERT(node, "First pattern atom is not a node");
expansions.emplace_back(Expansion{node});
} else {
ForEachPattern(*pattern, ignore_node, collect_expansion);
@ -155,7 +155,7 @@ PropertyFilter::PropertyFilter(const SymbolTable &symbol_table,
const Symbol &symbol, PropertyIx property,
Expression *value, Type type)
: symbol_(symbol), property_(property), type_(type), value_(value) {
CHECK(type != Type::RANGE);
MG_ASSERT(type != Type::RANGE);
UsedSymbolsCollector collector(symbol_table);
value->Accept(collector);
is_symbol_in_value_ = utils::Contains(collector.symbols_, symbol);
@ -190,11 +190,10 @@ PropertyFilter::PropertyFilter(const Symbol &symbol, PropertyIx property,
// we may be looking up.
}
IdFilter::IdFilter(const SymbolTable &symbol_table, const Symbol &symbol,
Expression *value)
: symbol_(symbol), value_(value) {
CHECK(value);
MG_ASSERT(value);
UsedSymbolsCollector collector(symbol_table);
value->Accept(collector);
is_symbol_in_value_ = utils::Contains(collector.symbols_, symbol);
@ -229,8 +228,8 @@ void Filters::EraseLabelFilter(const Symbol &symbol, LabelIx label,
continue;
}
filter_it->labels.erase(label_it);
DCHECK(!utils::Contains(filter_it->labels, label))
<< "Didn't expect duplicated labels";
DMG_ASSERT(!utils::Contains(filter_it->labels, label),
"Didn't expect duplicated labels");
if (filter_it->labels.empty()) {
// If there are no labels to filter, then erase the whole FilterInfo.
if (removed_filters) {
@ -489,8 +488,8 @@ void Filters::AnalyzeAndStoreFilter(Expression *expr,
};
// We are only interested to see the insides of And, because Or prevents
// indexing since any labels and properties found there may be optional.
DCHECK(!utils::IsSubtype(*expr, AndOperator::kType))
<< "Expected AndOperators have been split.";
DMG_ASSERT(!utils::IsSubtype(*expr, AndOperator::kType),
"Expected AndOperators have been split.");
if (auto *labels_test = utils::Downcast<LabelsTest>(expr)) {
// Since LabelsTest may contain any expression, we can only use the
// simplest test on an identifier.
@ -583,8 +582,8 @@ std::vector<SingleQueryPart> CollectSingleQueryParts(
AddMatching(*match, symbol_table, storage,
query_part->optional_matching.back());
} else {
DCHECK(query_part->optional_matching.empty())
<< "Match clause cannot follow optional match.";
DMG_ASSERT(query_part->optional_matching.empty(),
"Match clause cannot follow optional match.");
AddMatching(*match, symbol_table, storage, query_part->matching);
}
} else {
@ -612,7 +611,7 @@ QueryParts CollectQueryParts(SymbolTable &symbol_table, AstStorage &storage,
std::vector<QueryPart> query_parts;
auto *single_query = query->single_query_;
CHECK(single_query) << "Expected at least a single query";
MG_ASSERT(single_query, "Expected at least a single query");
query_parts.push_back(
QueryPart{CollectSingleQueryParts(symbol_table, storage, single_query)});
@ -623,7 +622,7 @@ QueryParts CollectQueryParts(SymbolTable &symbol_table, AstStorage &storage,
}
auto *single_query = cypher_union->single_query_;
CHECK(single_query) << "Expected UNION to have a query";
MG_ASSERT(single_query, "Expected UNION to have a query");
query_parts.push_back(
QueryPart{CollectSingleQueryParts(symbol_table, storage, single_query),
cypher_union});

View File

@ -189,8 +189,8 @@ class Filters final {
for (const auto &filter : all_filters_) {
if (filter.type == FilterInfo::Type::Label &&
utils::Contains(filter.used_symbols, symbol)) {
CHECK(filter.used_symbols.size() == 1U)
<< "Expected a single used symbol for label filter";
MG_ASSERT(filter.used_symbols.size() == 1U,
"Expected a single used symbol for label filter");
labels.insert(filter.labels.begin(), filter.labels.end());
}
}

View File

@ -121,7 +121,7 @@ bool PlanPrinter::PreVisit(query::plan::ExpandVariable &op) {
*out_ << "WeightedShortestPath";
break;
case Type::SINGLE:
LOG(FATAL) << "Unexpected ExpandVariable::type_";
LOG_FATAL("Unexpected ExpandVariable::type_");
}
*out_ << " (" << op.input_symbol_.name() << ")"
<< (op.common_.direction == query::EdgeAtom::Direction::IN ? "<-"

View File

@ -454,9 +454,9 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
}
void SetOnParent(const std::shared_ptr<LogicalOperator> &input) {
CHECK(input);
MG_ASSERT(input);
if (prev_ops_.empty()) {
CHECK(!new_root_);
MG_ASSERT(!new_root_);
new_root_ = input;
return;
}
@ -481,8 +481,8 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
std::optional<LabelIx> FindBestLabelIndex(
const std::unordered_set<LabelIx> &labels) {
CHECK(!labels.empty())
<< "Trying to find the best label without any labels.";
MG_ASSERT(!labels.empty(),
"Trying to find the best label without any labels.");
std::optional<LabelIx> best_label;
for (const auto &label : labels) {
if (!db_->LabelIndexExists(GetLabel(label))) continue;
@ -641,8 +641,9 @@ class IndexLookupRewriter final : public HierarchicalLogicalOperatorVisitor {
GetProperty(prop_filter.property_), prop_filter.property_.name,
view);
} else {
CHECK(prop_filter.value_) << "Property filter should either have "
"bounds or a value expression.";
MG_ASSERT(
prop_filter.value_,
"Property filter should either have bounds or a value expression.");
return std::make_unique<ScanAllByLabelPropertyValue>(
input, node_symbol, GetLabel(found_index->label),
GetProperty(prop_filter.property_), prop_filter.property_.name,
@ -679,8 +680,8 @@ std::unique_ptr<LogicalOperator> RewriteWithIndexLookup(
if (rewriter.new_root_) {
// This shouldn't happen in real use case, because IndexLookupRewriter
// removes Filter operations and they cannot be the root op. In case we
// somehow missed this, raise NotYetImplemented instead of CHECK crashing
// the application.
// somehow missed this, raise NotYetImplemented instead of MG_ASSERT
// crashing the application.
throw utils::NotYetImplemented("optimizing index lookup");
}
return root_op;

View File

@ -8,6 +8,7 @@
#include "utils/algorithm.hpp"
#include "utils/exceptions.hpp"
#include "utils/logging.hpp"
namespace query::plan {
@ -77,8 +78,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
if (where) {
where->Accept(*this);
}
CHECK(aggregations_.empty())
<< "Unexpected aggregations in ORDER BY or WHERE";
MG_ASSERT(aggregations_.empty(),
"Unexpected aggregations in ORDER BY or WHERE");
}
}
@ -121,16 +122,17 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
public:
bool PostVisit(ListLiteral &list_literal) override {
CHECK(list_literal.elements_.size() <= has_aggregation_.size())
<< "Expected as many has_aggregation_ flags as there are list"
"elements.";
MG_ASSERT(list_literal.elements_.size() <= has_aggregation_.size(),
"Expected as many has_aggregation_ flags as there are list"
"elements.");
PostVisitCollectionLiteral(list_literal, [](auto it) { return *it; });
return true;
}
bool PostVisit(MapLiteral &map_literal) override {
CHECK(map_literal.elements_.size() <= has_aggregation_.size())
<< "Expected has_aggregation_ flags as much as there are map elements.";
MG_ASSERT(
map_literal.elements_.size() <= has_aggregation_.size(),
"Expected has_aggregation_ flags as much as there are map elements.");
PostVisitCollectionLiteral(map_literal, [](auto it) { return it->second; });
return true;
}
@ -139,8 +141,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// Remove the symbol which is bound by all, because we are only interested
// in free (unbound) symbols.
used_symbols_.erase(symbol_table_.at(*all.identifier_));
CHECK(has_aggregation_.size() >= 3U)
<< "Expected 3 has_aggregation_ flags for ALL arguments";
MG_ASSERT(has_aggregation_.size() >= 3U,
"Expected 3 has_aggregation_ flags for ALL arguments");
bool has_aggr = false;
for (int i = 0; i < 3; ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -154,8 +156,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// Remove the symbol which is bound by single, because we are only
// interested in free (unbound) symbols.
used_symbols_.erase(symbol_table_.at(*single.identifier_));
CHECK(has_aggregation_.size() >= 3U)
<< "Expected 3 has_aggregation_ flags for SINGLE arguments";
MG_ASSERT(has_aggregation_.size() >= 3U,
"Expected 3 has_aggregation_ flags for SINGLE arguments");
bool has_aggr = false;
for (int i = 0; i < 3; ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -169,8 +171,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// Remove the symbol which is bound by any, because we are only interested
// in free (unbound) symbols.
used_symbols_.erase(symbol_table_.at(*any.identifier_));
CHECK(has_aggregation_.size() >= 3U)
<< "Expected 3 has_aggregation_ flags for ANY arguments";
MG_ASSERT(has_aggregation_.size() >= 3U,
"Expected 3 has_aggregation_ flags for ANY arguments");
bool has_aggr = false;
for (int i = 0; i < 3; ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -184,8 +186,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// Remove the symbol which is bound by none, because we are only interested
// in free (unbound) symbols.
used_symbols_.erase(symbol_table_.at(*none.identifier_));
CHECK(has_aggregation_.size() >= 3U)
<< "Expected 3 has_aggregation_ flags for NONE arguments";
MG_ASSERT(has_aggregation_.size() >= 3U,
"Expected 3 has_aggregation_ flags for NONE arguments");
bool has_aggr = false;
for (int i = 0; i < 3; ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -200,8 +202,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// in free (unbound) symbols.
used_symbols_.erase(symbol_table_.at(*reduce.accumulator_));
used_symbols_.erase(symbol_table_.at(*reduce.identifier_));
CHECK(has_aggregation_.size() >= 5U)
<< "Expected 5 has_aggregation_ flags for REDUCE arguments";
MG_ASSERT(has_aggregation_.size() >= 5U,
"Expected 5 has_aggregation_ flags for REDUCE arguments");
bool has_aggr = false;
for (int i = 0; i < 5; ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -212,9 +214,9 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
}
bool PostVisit(Coalesce &coalesce) override {
CHECK(has_aggregation_.size() >= coalesce.expressions_.size())
<< "Expected >= " << has_aggregation_.size()
<< "has_aggregation_ flags for COALESCE arguments";
MG_ASSERT(has_aggregation_.size() >= coalesce.expressions_.size(),
"Expected >= {} has_aggregation_ flags for COALESCE arguments",
has_aggregation_.size());
bool has_aggr = false;
for (size_t i = 0; i < coalesce.expressions_.size(); ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -228,8 +230,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// Remove the symbol bound by extract, because we are only interested
// in free (unbound) symbols.
used_symbols_.erase(symbol_table_.at(*extract.identifier_));
CHECK(has_aggregation_.size() >= 3U)
<< "Expected 3 has_aggregation_ flags for EXTRACT arguments";
MG_ASSERT(has_aggregation_.size() >= 3U,
"Expected 3 has_aggregation_ flags for EXTRACT arguments");
bool has_aggr = false;
for (int i = 0; i < 3; ++i) {
has_aggr = has_aggr || has_aggregation_.back();
@ -287,14 +289,14 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
has_aggregation_.emplace_back(has_aggr);
// TODO: Once we allow aggregations here, insert appropriate stuff in
// group_by.
CHECK(!has_aggr) << "Currently aggregations in CASE are not allowed";
MG_ASSERT(!has_aggr, "Currently aggregations in CASE are not allowed");
return false;
}
bool PostVisit(Function &function) override {
CHECK(function.arguments_.size() <= has_aggregation_.size())
<< "Expected as many has_aggregation_ flags as there are"
"function arguments.";
MG_ASSERT(function.arguments_.size() <= has_aggregation_.size(),
"Expected as many has_aggregation_ flags as there are"
"function arguments.");
bool has_aggr = false;
auto it = has_aggregation_.end();
std::advance(it, -function.arguments_.size());
@ -308,8 +310,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
#define VISIT_BINARY_OPERATOR(BinaryOperator) \
bool PostVisit(BinaryOperator &op) override { \
CHECK(has_aggregation_.size() >= 2U) \
<< "Expected at least 2 has_aggregation_ flags."; \
MG_ASSERT(has_aggregation_.size() >= 2U, \
"Expected at least 2 has_aggregation_ flags."); \
/* has_aggregation_ stack is reversed, last result is from the 2nd */ \
/* expression. */ \
bool aggr2 = has_aggregation_.back(); \
@ -366,8 +368,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
}
bool PostVisit(NamedExpression &named_expr) override {
CHECK(has_aggregation_.size() == 1U)
<< "Expected to reduce has_aggregation_ to single boolean.";
MG_ASSERT(has_aggregation_.size() == 1U,
"Expected to reduce has_aggregation_ to single boolean.");
if (!has_aggregation_.back()) {
group_by_.emplace_back(named_expr.expression_);
}
@ -381,8 +383,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
}
bool PostVisit(RegexMatch &regex_match) override {
CHECK(has_aggregation_.size() >= 2U)
<< "Expected 2 has_aggregation_ flags for RegexMatch arguments";
MG_ASSERT(has_aggregation_.size() >= 2U,
"Expected 2 has_aggregation_ flags for RegexMatch arguments");
bool has_aggr = has_aggregation_.back();
has_aggregation_.pop_back();
has_aggregation_.back() |= has_aggr;
@ -393,10 +395,10 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
// This should be used when body.all_identifiers is true, to generate
// expressions for Produce operator.
void ExpandUserSymbols() {
CHECK(named_expressions_.empty())
<< "ExpandUserSymbols should be first to fill named_expressions_";
CHECK(output_symbols_.empty())
<< "ExpandUserSymbols should be first to fill output_symbols_";
MG_ASSERT(named_expressions_.empty(),
"ExpandUserSymbols should be first to fill named_expressions_");
MG_ASSERT(output_symbols_.empty(),
"ExpandUserSymbols should be first to fill output_symbols_");
for (const auto &symbol : bound_symbols_) {
if (!symbol.user_declared()) {
continue;

View File

@ -8,6 +8,7 @@
#include "query/frontend/ast/ast.hpp"
#include "query/plan/operator.hpp"
#include "query/plan/preprocess.hpp"
#include "utils/logging.hpp"
namespace query::plan {
@ -94,20 +95,20 @@ template <typename T>
auto ReducePattern(
Pattern &pattern, std::function<T(NodeAtom *)> base,
std::function<T(T, NodeAtom *, EdgeAtom *, NodeAtom *)> collect) {
CHECK(!pattern.atoms_.empty()) << "Missing atoms in pattern";
MG_ASSERT(!pattern.atoms_.empty(), "Missing atoms in pattern");
auto atoms_it = pattern.atoms_.begin();
auto current_node = utils::Downcast<NodeAtom>(*atoms_it++);
CHECK(current_node) << "First pattern atom is not a node";
MG_ASSERT(current_node, "First pattern atom is not a node");
auto last_res = base(current_node);
// Remaining atoms need to follow sequentially as (EdgeAtom, NodeAtom)*
while (atoms_it != pattern.atoms_.end()) {
auto edge = utils::Downcast<EdgeAtom>(*atoms_it++);
CHECK(edge) << "Expected an edge atom in pattern.";
CHECK(atoms_it != pattern.atoms_.end())
<< "Edge atom should not end the pattern.";
MG_ASSERT(edge, "Expected an edge atom in pattern.");
MG_ASSERT(atoms_it != pattern.atoms_.end(),
"Edge atom should not end the pattern.");
auto prev_node = current_node;
current_node = utils::Downcast<NodeAtom>(*atoms_it++);
CHECK(current_node) << "Expected a node atom in pattern.";
MG_ASSERT(current_node, "Expected a node atom in pattern.");
last_res = collect(std::move(last_res), prev_node, edge, current_node);
}
return last_res;
@ -179,8 +180,8 @@ class RuleBasedPlanner {
}
uint64_t merge_id = 0;
for (auto *clause : query_part.remaining_clauses) {
CHECK(!utils::IsSubtype(*clause, Match::kType))
<< "Unexpected Match in remaining clauses";
MG_ASSERT(!utils::IsSubtype(*clause, Match::kType),
"Unexpected Match in remaining clauses");
if (auto *ret = utils::Downcast<Return>(clause)) {
input_op = impl::GenReturn(
*ret, std::move(input_op), *context.symbol_table, is_write,
@ -303,7 +304,7 @@ class RuleBasedPlanner {
}
const auto &edge_symbol = symbol_table.at(*edge->identifier_);
if (!bound_symbols.insert(edge_symbol).second) {
LOG(FATAL) << "Symbols used for created edges cannot be redeclared.";
LOG_FATAL("Symbols used for created edges cannot be redeclared.");
}
auto node_info = node_to_creation_info(*node);
std::vector<std::pair<storage::PropertyId, Expression *>> properties;
@ -311,8 +312,9 @@ class RuleBasedPlanner {
for (const auto &kv : edge->properties_) {
properties.push_back({GetProperty(kv.first), kv.second});
}
CHECK(edge->edge_types_.size() == 1)
<< "Creating an edge with a single type should be required by syntax";
MG_ASSERT(
edge->edge_types_.size() == 1,
"Creating an edge with a single type should be required by syntax");
EdgeCreationInfo edge_info{edge_symbol, properties,
GetEdgeType(edge->edge_types_[0]),
edge->direction_};
@ -425,8 +427,8 @@ class RuleBasedPlanner {
symbol_table.at(*expansion.node2->identifier_);
auto existing_node = utils::Contains(bound_symbols, node_symbol);
const auto &edge_symbol = symbol_table.at(*edge->identifier_);
CHECK(!utils::Contains(bound_symbols, edge_symbol))
<< "Existing edges are not supported";
MG_ASSERT(!utils::Contains(bound_symbols, edge_symbol),
"Existing edges are not supported");
std::vector<storage::EdgeTypeId> edge_types;
edge_types.reserve(edge->edge_types_.size());
for (const auto &type : edge->edge_types_) {
@ -457,8 +459,8 @@ class RuleBasedPlanner {
bound_symbols.insert(filter_lambda.inner_edge_symbol).second;
bool inner_node_bound =
bound_symbols.insert(filter_lambda.inner_node_symbol).second;
CHECK(inner_edge_bound && inner_node_bound)
<< "An inner edge and node can't be bound from before";
MG_ASSERT(inner_edge_bound && inner_node_bound,
"An inner edge and node can't be bound from before");
}
// Join regular filters with lambda filter expression, so that they
// are done inline together. Semantic analysis should guarantee that
@ -488,19 +490,19 @@ class RuleBasedPlanner {
}
// TODO: Pass weight lambda.
CHECK(match_context.view == storage::View::OLD)
<< "ExpandVariable should only be planned with "
"storage::View::OLD";
MG_ASSERT(
match_context.view == storage::View::OLD,
"ExpandVariable should only be planned with storage::View::OLD");
last_op = std::make_unique<ExpandVariable>(
std::move(last_op), node1_symbol, node_symbol, edge_symbol,
edge->type_, expansion.direction, edge_types,
expansion.is_flipped, edge->lower_bound_, edge->upper_bound_,
existing_node, filter_lambda, weight_lambda, total_weight);
} else {
last_op = std::make_unique<Expand>(
std::move(last_op), node1_symbol, node_symbol, edge_symbol,
expansion.direction, edge_types, existing_node,
match_context.view);
last_op = std::make_unique<Expand>(std::move(last_op), node1_symbol,
node_symbol, edge_symbol,
expansion.direction, edge_types,
existing_node, match_context.view);
}
// Bind the expanded edge and node.
@ -537,14 +539,14 @@ class RuleBasedPlanner {
storage);
}
}
CHECK(named_paths.empty()) << "Expected to generate all named paths";
MG_ASSERT(named_paths.empty(), "Expected to generate all named paths");
// We bound all named path symbols, so just add them to new_symbols.
for (const auto &named_path : matching.named_paths) {
CHECK(utils::Contains(bound_symbols, named_path.first))
<< "Expected generated named path to have bound symbol";
MG_ASSERT(utils::Contains(bound_symbols, named_path.first),
"Expected generated named path to have bound symbol");
match_context.new_symbols.emplace_back(named_path.first);
}
CHECK(filters.empty()) << "Expected to generate all filters";
MG_ASSERT(filters.empty(), "Expected to generate all filters");
return last_op;
}
@ -563,12 +565,12 @@ class RuleBasedPlanner {
for (auto &set : merge.on_create_) {
on_create = HandleWriteClause(set, on_create, *context_->symbol_table,
context_->bound_symbols);
CHECK(on_create) << "Expected SET in MERGE ... ON CREATE";
MG_ASSERT(on_create, "Expected SET in MERGE ... ON CREATE");
}
for (auto &set : merge.on_match_) {
on_match = HandleWriteClause(set, on_match, *context_->symbol_table,
context_->bound_symbols);
CHECK(on_match) << "Expected SET in MERGE ... ON MATCH";
MG_ASSERT(on_match, "Expected SET in MERGE ... ON MATCH");
}
return std::make_unique<plan::Merge>(
std::move(input_op), std::move(on_match), std::move(on_create));

View File

@ -3,9 +3,8 @@
#include <limits>
#include <queue>
#include "glog/logging.h"
#include "utils/flag_validation.hpp"
#include "utils/logging.hpp"
DEFINE_VALIDATED_HIDDEN_uint64(
query_max_plans, 1000U, "Maximum number of generated plans for a query.",
@ -63,9 +62,10 @@ void AddNextExpansions(
}
if (symbol_table.at(*expansion.node1->identifier_) != node_symbol) {
// We are not expanding from node1, so flip the expansion.
DCHECK(expansion.node2 &&
symbol_table.at(*expansion.node2->identifier_) == node_symbol)
<< "Expected node_symbol to be bound in node2";
DMG_ASSERT(
expansion.node2 &&
symbol_table.at(*expansion.node2->identifier_) == node_symbol,
"Expected node_symbol to be bound in node2");
if (expansion.edge->type_ != EdgeAtom::Type::BREADTH_FIRST) {
// BFS must *not* be flipped. Doing that changes the BFS results.
std::swap(expansion.node1, expansion.node2);
@ -178,8 +178,9 @@ VaryMatchingStart::iterator::iterator(VaryMatchingStart *self, bool is_done)
current_matching_.expansions = ExpansionsFrom(
**start_nodes_it_, self_->matching_, self_->symbol_table_);
}
DCHECK(start_nodes_it_ || self_->nodes_.empty())
<< "start_nodes_it_ should only be nullopt when self_->nodes_ is empty";
DMG_ASSERT(
start_nodes_it_ || self_->nodes_.empty(),
"start_nodes_it_ should only be nullopt when self_->nodes_ is empty");
if (is_done) {
start_nodes_it_ = self_->nodes_.end();
}
@ -187,8 +188,9 @@ VaryMatchingStart::iterator::iterator(VaryMatchingStart *self, bool is_done)
VaryMatchingStart::iterator &VaryMatchingStart::iterator::operator++() {
if (!start_nodes_it_) {
DCHECK(self_->nodes_.empty())
<< "start_nodes_it_ should only be nullopt when self_->nodes_ is empty";
DMG_ASSERT(
self_->nodes_.empty(),
"start_nodes_it_ should only be nullopt when self_->nodes_ is empty");
start_nodes_it_ = self_->nodes_.end();
}
if (*start_nodes_it_ == self_->nodes_.end()) {
@ -282,15 +284,15 @@ VaryQueryPartMatching::iterator &VaryQueryPartMatching::iterator::operator++() {
void VaryQueryPartMatching::iterator::SetCurrentQueryPart() {
current_query_part_.matching = *matchings_it_;
DCHECK(optional_it_ != optional_end_ || optional_begin_ == optional_end_)
<< "Either there are no optional matchings or we can always "
"generate a variation";
DMG_ASSERT(optional_it_ != optional_end_ || optional_begin_ == optional_end_,
"Either there are no optional matchings or we can always "
"generate a variation");
if (optional_it_ != optional_end_) {
current_query_part_.optional_matching = *optional_it_;
}
DCHECK(merge_it_ != merge_end_ || merge_begin_ == merge_end_)
<< "Either there are no merge matchings or we can always generate "
"a variation";
DMG_ASSERT(merge_it_ != merge_end_ || merge_begin_ == merge_end_,
"Either there are no merge matchings or we can always generate "
"a variation");
if (merge_it_ != merge_end_) {
current_query_part_.merge_matching = *merge_it_;
}

View File

@ -92,8 +92,9 @@ class CartesianProduct {
++sets_it->second;
}
// We can now collect another product from the modified set iterators.
DCHECK(current_product_.size() == sets_.size())
<< "Expected size of current_product_ to match the size of sets_";
DMG_ASSERT(
current_product_.size() == sets_.size(),
"Expected size of current_product_ to match the size of sets_");
size_t i = 0;
// Change only the prefix of the product, remaining elements (after
// sets_it) should be the same.

View File

@ -6,9 +6,8 @@
#include <regex>
#include <type_traits>
#include <glog/logging.h>
#include "utils/algorithm.hpp"
#include "utils/logging.hpp"
#include "utils/math.hpp"
#include "utils/string.hpp"
@ -79,7 +78,7 @@ namespace {
// May throw whatever the constructor of U throws. `std::bad_alloc` is handled
// by returning nullptr.
template <class U, class... TArgs>
U *new_mgp_object(utils::MemoryResource *memory, TArgs &&... args) {
U *new_mgp_object(utils::MemoryResource *memory, TArgs &&...args) {
utils::Allocator<U> allocator(memory);
try {
return allocator.template new_object<U>(std::forward<TArgs>(args)...);
@ -89,7 +88,7 @@ U *new_mgp_object(utils::MemoryResource *memory, TArgs &&... args) {
}
template <class U, class... TArgs>
U *new_mgp_object(mgp_memory *memory, TArgs &&... args) {
U *new_mgp_object(mgp_memory *memory, TArgs &&...args) {
return new_mgp_object<U, TArgs...>(memory->impl,
std::forward<TArgs>(args)...);
}
@ -165,8 +164,8 @@ query::TypedValue ToTypedValue(const mgp_value &val,
return query::TypedValue(mgp_value_get_edge(&val)->impl, memory);
case MGP_VALUE_TYPE_PATH: {
const auto *path = mgp_value_get_path(&val);
CHECK(!path->vertices.empty());
CHECK(path->vertices.size() == path->edges.size() + 1);
MG_ASSERT(!path->vertices.empty());
MG_ASSERT(path->vertices.size() == path->edges.size() + 1);
query::Path tv_path(path->vertices[0].impl, memory);
for (size_t i = 0; i < path->edges.size(); ++i) {
tv_path.Expand(path->edges[i].impl);
@ -196,32 +195,32 @@ mgp_value::mgp_value(const char *val, utils::MemoryResource *m)
mgp_value::mgp_value(mgp_list *val, utils::MemoryResource *m) noexcept
: type(MGP_VALUE_TYPE_LIST), memory(m), list_v(val) {
CHECK(val->GetMemoryResource() == m)
<< "Unable to take ownership of a pointer with different allocator.";
MG_ASSERT(val->GetMemoryResource() == m,
"Unable to take ownership of a pointer with different allocator.");
}
mgp_value::mgp_value(mgp_map *val, utils::MemoryResource *m) noexcept
: type(MGP_VALUE_TYPE_MAP), memory(m), map_v(val) {
CHECK(val->GetMemoryResource() == m)
<< "Unable to take ownership of a pointer with different allocator.";
MG_ASSERT(val->GetMemoryResource() == m,
"Unable to take ownership of a pointer with different allocator.");
}
mgp_value::mgp_value(mgp_vertex *val, utils::MemoryResource *m) noexcept
: type(MGP_VALUE_TYPE_VERTEX), memory(m), vertex_v(val) {
CHECK(val->GetMemoryResource() == m)
<< "Unable to take ownership of a pointer with different allocator.";
MG_ASSERT(val->GetMemoryResource() == m,
"Unable to take ownership of a pointer with different allocator.");
}
mgp_value::mgp_value(mgp_edge *val, utils::MemoryResource *m) noexcept
: type(MGP_VALUE_TYPE_EDGE), memory(m), edge_v(val) {
CHECK(val->GetMemoryResource() == m)
<< "Unable to take ownership of a pointer with different allocator.";
MG_ASSERT(val->GetMemoryResource() == m,
"Unable to take ownership of a pointer with different allocator.");
}
mgp_value::mgp_value(mgp_path *val, utils::MemoryResource *m) noexcept
: type(MGP_VALUE_TYPE_PATH), memory(m), path_v(val) {
CHECK(val->GetMemoryResource() == m)
<< "Unable to take ownership of a pointer with different allocator.";
MG_ASSERT(val->GetMemoryResource() == m,
"Unable to take ownership of a pointer with different allocator.");
}
mgp_value::mgp_value(const query::TypedValue &tv, const mgp_graph *graph,
@ -402,7 +401,7 @@ mgp_value::mgp_value(const mgp_value &other, utils::MemoryResource *m)
namespace {
void DeleteValueMember(mgp_value *value) noexcept {
CHECK(value);
MG_ASSERT(value);
utils::Allocator<mgp_value> allocator(value->GetMemoryResource());
switch (mgp_value_get_type(value)) {
case MGP_VALUE_TYPE_NULL:
@ -735,14 +734,16 @@ mgp_path *mgp_path_make_with_start(const mgp_vertex *vertex,
}
mgp_path *mgp_path_copy(const mgp_path *path, mgp_memory *memory) {
CHECK(mgp_path_size(path) == path->vertices.size() - 1) << "Invalid mgp_path";
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1,
"Invalid mgp_path");
return new_mgp_object<mgp_path>(memory, *path);
}
void mgp_path_destroy(mgp_path *path) { delete_mgp_object(path); }
int mgp_path_expand(mgp_path *path, const mgp_edge *edge) {
CHECK(mgp_path_size(path) == path->vertices.size() - 1) << "Invalid mgp_path";
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1,
"Invalid mgp_path");
// Check that the both the last vertex on path and dst_vertex are endpoints of
// the given edge.
const auto *src_vertex = &path->vertices.back();
@ -760,37 +761,37 @@ int mgp_path_expand(mgp_path *path, const mgp_edge *edge) {
try {
path->edges.push_back(*edge);
} catch (...) {
CHECK(mgp_path_size(path) == path->vertices.size() - 1);
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1);
return 0;
}
try {
path->vertices.push_back(*dst_vertex);
} catch (...) {
path->edges.pop_back();
CHECK(mgp_path_size(path) == path->vertices.size() - 1);
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1);
return 0;
}
CHECK(mgp_path_size(path) == path->vertices.size() - 1);
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1);
return 1;
}
size_t mgp_path_size(const mgp_path *path) { return path->edges.size(); }
const mgp_vertex *mgp_path_vertex_at(const mgp_path *path, size_t i) {
CHECK(mgp_path_size(path) == path->vertices.size() - 1);
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1);
if (i > mgp_path_size(path)) return nullptr;
return &path->vertices[i];
}
const mgp_edge *mgp_path_edge_at(const mgp_path *path, size_t i) {
CHECK(mgp_path_size(path) == path->vertices.size() - 1);
MG_ASSERT(mgp_path_size(path) == path->vertices.size() - 1);
if (i >= mgp_path_size(path)) return nullptr;
return &path->edges[i];
}
int mgp_path_equal(const struct mgp_path *p1, const struct mgp_path *p2) {
CHECK(mgp_path_size(p1) == p1->vertices.size() - 1);
CHECK(mgp_path_size(p2) == p2->vertices.size() - 1);
MG_ASSERT(mgp_path_size(p1) == p1->vertices.size() - 1);
MG_ASSERT(mgp_path_size(p2) == p2->vertices.size() - 1);
if (mgp_path_size(p1) != mgp_path_size(p2)) return 0;
const auto *start1 = mgp_path_vertex_at(p1, 0);
const auto *start2 = mgp_path_vertex_at(p2, 0);
@ -817,7 +818,7 @@ int mgp_result_set_error_msg(mgp_result *res, const char *msg) {
mgp_result_record *mgp_result_new_record(mgp_result *res) {
auto *memory = res->rows.get_allocator().GetMemoryResource();
CHECK(res->signature) << "Expected to have a valid signature";
MG_ASSERT(res->signature, "Expected to have a valid signature");
try {
res->rows.push_back(mgp_result_record{
res->signature,
@ -832,7 +833,7 @@ int mgp_result_record_insert(mgp_result_record *record, const char *field_name,
const mgp_value *val) {
auto *memory = record->values.get_allocator().GetMemoryResource();
// Validate field_name & val satisfy the procedure's result signature.
CHECK(record->signature) << "Expected to have a valid signature";
MG_ASSERT(record->signature, "Expected to have a valid signature");
auto find_it = record->signature->find(field_name);
if (find_it == record->signature->end()) return 0;
const auto *type = find_it->second.first;
@ -867,8 +868,9 @@ const mgp_property *mgp_properties_iterator_next(mgp_properties_iterator *it) {
// try ... catch just to be sure.
try {
if (it->current_it == it->pvs.end()) {
CHECK(!it->current) << "Iteration is already done, so it->current should "
"have been set to std::nullopt";
MG_ASSERT(!it->current,
"Iteration is already done, so it->current should "
"have been set to std::nullopt");
return nullptr;
}
if (++it->current_it == it->pvs.end()) {
@ -914,7 +916,7 @@ size_t mgp_vertex_labels_count(const mgp_vertex *v) {
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting vertex labels.";
spdlog::error("Unexpected error when getting vertex labels.");
return 0;
}
}
@ -932,7 +934,7 @@ mgp_label mgp_vertex_label_at(const mgp_vertex *v, size_t i) {
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting vertex labels.";
spdlog::error("Unexpected error when getting vertex labels.");
return mgp_label{nullptr};
}
}
@ -956,7 +958,7 @@ int mgp_vertex_has_label_named(const mgp_vertex *v, const char *name) {
// creating a new LabelId mapping and we need to handle that.
label = v->graph->impl->NameToLabel(name);
} catch (...) {
LOG(ERROR) << "Unable to allocate a LabelId mapping";
spdlog::error("Unable to allocate a LabelId mapping");
// If we need to allocate a new mapping, then the vertex does not have such
// a label, so return 0.
return 0;
@ -970,7 +972,7 @@ int mgp_vertex_has_label_named(const mgp_vertex *v, const char *name) {
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when checking vertex has label.";
spdlog::error("Unexpected error when checking vertex has label.");
return 0;
}
}
@ -995,7 +997,7 @@ mgp_value *mgp_vertex_get_property(const mgp_vertex *v, const char *name,
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting vertex property";
spdlog::error("Unexpected error when getting vertex property");
return nullptr;
}
}
@ -1024,7 +1026,7 @@ mgp_properties_iterator *mgp_vertex_iter_properties(const mgp_vertex *v,
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting vertex properties";
spdlog::error("Unexpected error when getting vertex properties");
return nullptr;
}
}
@ -1056,7 +1058,7 @@ mgp_edges_iterator *mgp_vertex_iter_in_edges(const mgp_vertex *v,
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting in edges";
spdlog::error("Unexpected error when getting in edges");
mgp_edges_iterator_destroy(it);
return nullptr;
}
@ -1089,7 +1091,7 @@ mgp_edges_iterator *mgp_vertex_iter_out_edges(const mgp_vertex *v,
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting out edges";
spdlog::error("Unexpected error when getting out edges");
mgp_edges_iterator_destroy(it);
return nullptr;
}
@ -1116,8 +1118,9 @@ const mgp_edge *mgp_edges_iterator_next(mgp_edges_iterator *it) {
if (!it->in && !it->out) return nullptr;
auto next = [&](auto *impl_it, const auto &end) -> const mgp_edge * {
if (*impl_it == end) {
CHECK(!it->current_e) << "Iteration is already done, so it->current_e "
"should have been set to std::nullopt";
MG_ASSERT(!it->current_e,
"Iteration is already done, so it->current_e "
"should have been set to std::nullopt");
return nullptr;
}
if (++(*impl_it) == end) {
@ -1184,7 +1187,7 @@ mgp_value *mgp_edge_get_property(const mgp_edge *e, const char *name,
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting edge property";
spdlog::error("Unexpected error when getting edge property");
return nullptr;
}
}
@ -1214,7 +1217,7 @@ mgp_properties_iterator *mgp_edge_iter_properties(const mgp_edge *e,
case storage::Error::PROPERTIES_DISABLED:
case storage::Error::VERTEX_HAS_EDGES:
case storage::Error::SERIALIZATION_ERROR:
LOG(ERROR) << "Unexpected error when getting edge properties";
spdlog::error("Unexpected error when getting edge properties");
return nullptr;
}
}
@ -1257,8 +1260,9 @@ const mgp_vertex *mgp_vertices_iterator_get(const mgp_vertices_iterator *it) {
const mgp_vertex *mgp_vertices_iterator_next(mgp_vertices_iterator *it) {
try {
if (it->current_it == it->vertices.end()) {
CHECK(!it->current_v) << "Iteration is already done, so it->current_v "
"should have been set to std::nullopt";
MG_ASSERT(!it->current_v,
"Iteration is already done, so it->current_v "
"should have been set to std::nullopt");
return nullptr;
}
if (++it->current_it == it->vertices.end()) {
@ -1479,7 +1483,7 @@ int mgp_proc_add_deprecated_result(mgp_proc *proc, const char *name,
}
int mgp_must_abort(const mgp_graph *graph) {
CHECK(graph->ctx);
MG_ASSERT(graph->ctx);
return query::MustAbort(*graph->ctx);
}
@ -1521,7 +1525,7 @@ std::ostream &PrintValue(const TypedValue &value, std::ostream *stream) {
case TypedValue::Type::Vertex:
case TypedValue::Type::Edge:
case TypedValue::Type::Path:
LOG(FATAL) << "value must not be a graph element";
LOG_FATAL("value must not be a graph element");
}
}

View File

@ -9,6 +9,7 @@ extern "C" {
#include "py/py.hpp"
#include "query/procedure/py_module.hpp"
#include "utils/file.hpp"
#include "utils/logging.hpp"
#include "utils/pmr/vector.hpp"
#include "utils/string.hpp"
@ -93,9 +94,11 @@ void RegisterMgLoad(ModuleRegistry *module_registry, utils::RWLock *lock,
auto load_cb = [module_registry, with_unlock_shared](
const mgp_list *args, const mgp_graph *, mgp_result *res,
mgp_memory *) {
CHECK(mgp_list_size(args) == 1U) << "Should have been type checked already";
MG_ASSERT(mgp_list_size(args) == 1U,
"Should have been type checked already");
const mgp_value *arg = mgp_list_at(args, 0);
CHECK(mgp_value_is_string(arg)) << "Should have been type checked already";
MG_ASSERT(mgp_value_is_string(arg),
"Should have been type checked already");
bool succ = false;
with_unlock_shared([&]() {
succ = module_registry->LoadOrReloadModuleFromName(
@ -226,13 +229,13 @@ SharedLibraryModule::~SharedLibraryModule() {
}
bool SharedLibraryModule::Load(const std::filesystem::path &file_path) {
CHECK(!handle_) << "Attempting to load an already loaded module...";
LOG(INFO) << "Loading module " << file_path << " ...";
MG_ASSERT(!handle_, "Attempting to load an already loaded module...");
spdlog::info("Loading module {}...", file_path);
file_path_ = file_path;
dlerror(); // Clear any existing error.
handle_ = dlopen(file_path.c_str(), RTLD_NOW | RTLD_LOCAL);
if (!handle_) {
LOG(ERROR) << "Unable to load module " << file_path << "; " << dlerror();
spdlog::error("Unable to load module {}; {}", file_path, dlerror());
return false;
}
// Get required mgp_init_module
@ -240,52 +243,52 @@ bool SharedLibraryModule::Load(const std::filesystem::path &file_path) {
dlsym(handle_, "mgp_init_module"));
const char *error = dlerror();
if (!init_fn_ || error) {
LOG(ERROR) << "Unable to load module " << file_path << "; " << error;
spdlog::error("Unable to load module {}; {}", file_path, error);
dlclose(handle_);
handle_ = nullptr;
return false;
}
if (!WithModuleRegistration(
&procedures_, [&](auto *module_def, auto *memory) {
// Run mgp_init_module which must succeed.
int init_res = init_fn_(module_def, memory);
if (init_res != 0) {
LOG(ERROR) << "Unable to load module " << file_path
<< "; mgp_init_module returned " << init_res;
dlclose(handle_);
handle_ = nullptr;
return false;
}
return true;
})) {
if (!WithModuleRegistration(&procedures_, [&](auto *module_def,
auto *memory) {
// Run mgp_init_module which must succeed.
int init_res = init_fn_(module_def, memory);
if (init_res != 0) {
spdlog::error("Unable to load module {}; mgp_init_module_returned {}",
file_path, init_res);
dlclose(handle_);
handle_ = nullptr;
return false;
}
return true;
})) {
return false;
}
// Get optional mgp_shutdown_module
shutdown_fn_ =
reinterpret_cast<int (*)()>(dlsym(handle_, "mgp_shutdown_module"));
error = dlerror();
if (error)
LOG(WARNING) << "When loading module " << file_path << "; " << error;
LOG(INFO) << "Loaded module " << file_path;
if (error) spdlog::warn("When loading module {}; {}", file_path, error);
spdlog::info("Loaded module {}", file_path);
return true;
}
bool SharedLibraryModule::Close() {
CHECK(handle_) << "Attempting to close a module that has not been loaded...";
LOG(INFO) << "Closing module " << file_path_ << " ...";
MG_ASSERT(handle_,
"Attempting to close a module that has not been loaded...");
spdlog::info("Closing module {}...", file_path_);
// non-existent shutdown function is semantically the same as a shutdown
// function that does nothing.
int shutdown_res = 0;
if (shutdown_fn_) shutdown_res = shutdown_fn_();
if (shutdown_res != 0) {
LOG(WARNING) << "When closing module " << file_path_
<< "; mgp_shutdown_module returned " << shutdown_res;
spdlog::warn("When closing module {}; mgp_shutdown_module returned {}",
file_path_, shutdown_res);
}
if (dlclose(handle_) != 0) {
LOG(ERROR) << "Failed to close module " << file_path_ << "; " << dlerror();
spdlog::error("Failed to close module {}; {}", file_path_, dlerror());
return false;
}
LOG(INFO) << "Closed module " << file_path_;
spdlog::info("Closed module {}", file_path_);
handle_ = nullptr;
procedures_.clear();
return true;
@ -293,8 +296,9 @@ bool SharedLibraryModule::Close() {
const std::map<std::string, mgp_proc, std::less<>>
*SharedLibraryModule::Procedures() const {
CHECK(handle_) << "Attempting to access procedures of a module that has not "
"been loaded...";
MG_ASSERT(handle_,
"Attempting to access procedures of a module that has not "
"been loaded...");
return &procedures_;
}
@ -327,13 +331,13 @@ PythonModule::~PythonModule() {
}
bool PythonModule::Load(const std::filesystem::path &file_path) {
CHECK(!py_module_) << "Attempting to load an already loaded module...";
LOG(INFO) << "Loading module " << file_path << " ...";
MG_ASSERT(!py_module_, "Attempting to load an already loaded module...");
spdlog::info("Loading module {}...", file_path);
file_path_ = file_path;
auto gil = py::EnsureGIL();
auto maybe_exc = py::AppendToSysPath(file_path.parent_path().c_str());
if (maybe_exc) {
LOG(ERROR) << "Unable to load module " << file_path << "; " << *maybe_exc;
spdlog::error("Unable to load module {}; {}", file_path, *maybe_exc);
return false;
}
py_module_ =
@ -341,18 +345,18 @@ bool PythonModule::Load(const std::filesystem::path &file_path) {
return ImportPyModule(file_path.stem().c_str(), module_def);
});
if (py_module_) {
LOG(INFO) << "Loaded module " << file_path;
spdlog::info("Loaded module {}", file_path);
return true;
}
auto exc_info = py::FetchError().value();
LOG(ERROR) << "Unable to load module " << file_path << "; " << exc_info;
spdlog::error("Unable to load module {}; {}", file_path, exc_info);
return false;
}
bool PythonModule::Close() {
CHECK(py_module_)
<< "Attempting to close a module that has not been loaded...";
LOG(INFO) << "Closing module " << file_path_ << " ...";
MG_ASSERT(py_module_,
"Attempting to close a module that has not been loaded...");
spdlog::info("Closing module {}...", file_path_);
// The procedures are closures which hold references to the Python callbacks.
// Releasing these references might result in deallocations so we need to take
// the GIL.
@ -363,19 +367,20 @@ bool PythonModule::Close() {
py::Object sys(PyImport_ImportModule("sys"));
if (PyDict_DelItemString(sys.GetAttr("modules").Ptr(),
file_path_.stem().c_str()) != 0) {
LOG(WARNING) << "Failed to remove the module from sys.modules";
spdlog::warn("Failed to remove the module from sys.modules");
py_module_ = py::Object(nullptr);
return false;
}
py_module_ = py::Object(nullptr);
LOG(INFO) << "Closed module " << file_path_;
spdlog::info("Closed module {}", file_path_);
return true;
}
const std::map<std::string, mgp_proc, std::less<>> *PythonModule::Procedures()
const {
CHECK(py_module_) << "Attempting to access procedures of a module that has "
"not been loaded...";
MG_ASSERT(py_module_,
"Attempting to access procedures of a module that has "
"not been loaded...");
return &procedures_;
}
@ -384,7 +389,7 @@ namespace {
std::unique_ptr<Module> LoadModuleFromFile(const std::filesystem::path &path) {
const auto &ext = path.extension();
if (ext != ".so" && ext != ".py") {
LOG(WARNING) << "Unknown query module file " << path;
spdlog::warn("Unknown query module file {}", path);
return nullptr;
}
std::unique_ptr<Module> module;
@ -404,10 +409,10 @@ std::unique_ptr<Module> LoadModuleFromFile(const std::filesystem::path &path) {
bool ModuleRegistry::RegisterModule(const std::string_view &name,
std::unique_ptr<Module> module) {
CHECK(!name.empty()) << "Module name cannot be empty";
CHECK(module) << "Tried to register an invalid module";
MG_ASSERT(!name.empty(), "Module name cannot be empty");
MG_ASSERT(module, "Tried to register an invalid module");
if (modules_.find(name) != modules_.end()) {
LOG(ERROR) << "Unable to overwrite an already loaded module " << name;
spdlog::error("Unable to overwrite an already loaded module {}", name);
return false;
}
modules_.emplace(name, std::move(module));
@ -415,8 +420,8 @@ bool ModuleRegistry::RegisterModule(const std::string_view &name,
}
void ModuleRegistry::DoUnloadAllModules() {
CHECK(modules_.find("mg") != modules_.end())
<< "Expected the builtin \"mg\" module to be present.";
MG_ASSERT(modules_.find("mg") != modules_.end(),
"Expected the builtin \"mg\" module to be present.");
// This is correct because the destructor will close each module. However,
// we don't want to unload the builtin "mg" module.
auto module = std::move(modules_["mg"]);
@ -443,12 +448,12 @@ bool ModuleRegistry::LoadOrReloadModuleFromName(const std::string_view &name) {
auto found_it = modules_.find(name);
if (found_it != modules_.end()) {
if (!found_it->second->Close()) {
LOG(WARNING) << "Failed to close module " << found_it->first;
spdlog::warn("Failed to close module {}", found_it->first);
}
modules_.erase(found_it);
}
if (!utils::DirExists(modules_dir_)) {
LOG(ERROR) << "Module directory " << modules_dir_ << " doesn't exist";
spdlog::error("Module directory {} doesn't exist", modules_dir_);
return false;
}
for (const auto &entry : std::filesystem::directory_iterator(modules_dir_)) {
@ -465,7 +470,7 @@ bool ModuleRegistry::LoadOrReloadModuleFromName(const std::string_view &name) {
void ModuleRegistry::UnloadAndLoadModulesFromDirectory() {
if (modules_dir_.empty()) return;
if (!utils::DirExists(modules_dir_)) {
LOG(ERROR) << "Module directory " << modules_dir_ << " doesn't exist";
spdlog::error("Module directory {} doesn't exist", modules_dir_);
return;
}
std::unique_lock<utils::RWLock> guard(lock_);
@ -502,7 +507,7 @@ std::optional<std::pair<procedure::ModulePtr, const mgp_proc *>> FindProcedure(
utils::Split(&name_parts, fully_qualified_procedure_name, ".");
if (name_parts.size() == 1U) return std::nullopt;
auto last_dot_pos = fully_qualified_procedure_name.find_last_of('.');
CHECK(last_dot_pos != std::string_view::npos);
MG_ASSERT(last_dot_pos != std::string_view::npos);
const auto &module_name =
fully_qualified_procedure_name.substr(0, last_dot_pos);
const auto &proc_name = name_parts.back();

View File

@ -55,8 +55,8 @@ struct PyVerticesIterator {
PyObject *MakePyVertex(const mgp_vertex &vertex, PyGraph *py_graph);
void PyVerticesIteratorDealloc(PyVerticesIterator *self) {
CHECK(self->it);
CHECK(self->py_graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
// Avoid invoking `mgp_vertices_iterator_destroy` if we are not in valid
// execution context. The query execution should free all memory used during
// execution, so we may cause a double free issue.
@ -67,9 +67,9 @@ void PyVerticesIteratorDealloc(PyVerticesIterator *self) {
PyObject *PyVerticesIteratorGet(PyVerticesIterator *self,
PyObject *Py_UNUSED(ignored)) {
CHECK(self->it);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *vertex = mgp_vertices_iterator_get(self->it);
if (!vertex) Py_RETURN_NONE;
return MakePyVertex(*vertex, self->py_graph);
@ -77,9 +77,9 @@ PyObject *PyVerticesIteratorGet(PyVerticesIterator *self,
PyObject *PyVerticesIteratorNext(PyVerticesIterator *self,
PyObject *Py_UNUSED(ignored)) {
CHECK(self->it);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *vertex = mgp_vertices_iterator_next(self->it);
if (!vertex) Py_RETURN_NONE;
return MakePyVertex(*vertex, self->py_graph);
@ -118,8 +118,8 @@ struct PyEdgesIterator {
PyObject *MakePyEdge(const mgp_edge &edge, PyGraph *py_graph);
void PyEdgesIteratorDealloc(PyEdgesIterator *self) {
CHECK(self->it);
CHECK(self->py_graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
// Avoid invoking `mgp_edges_iterator_destroy` if we are not in valid
// execution context. The query execution should free all memory used during
// execution, so we may cause a double free issue.
@ -130,9 +130,9 @@ void PyEdgesIteratorDealloc(PyEdgesIterator *self) {
PyObject *PyEdgesIteratorGet(PyEdgesIterator *self,
PyObject *Py_UNUSED(ignored)) {
CHECK(self->it);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *edge = mgp_edges_iterator_get(self->it);
if (!edge) Py_RETURN_NONE;
return MakePyEdge(*edge, self->py_graph);
@ -140,9 +140,9 @@ PyObject *PyEdgesIteratorGet(PyEdgesIterator *self,
PyObject *PyEdgesIteratorNext(PyEdgesIterator *self,
PyObject *Py_UNUSED(ignored)) {
CHECK(self->it);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *edge = mgp_edges_iterator_next(self->it);
if (!edge) Py_RETURN_NONE;
return MakePyEdge(*edge, self->py_graph);
@ -183,8 +183,8 @@ PyObject *PyGraphIsValid(PyGraph *self, PyObject *Py_UNUSED(ignored)) {
PyObject *MakePyVertex(mgp_vertex *vertex, PyGraph *py_graph);
PyObject *PyGraphGetVertexById(PyGraph *self, PyObject *args) {
CHECK(self->graph);
CHECK(self->memory);
MG_ASSERT(self->graph);
MG_ASSERT(self->memory);
static_assert(std::is_same_v<int64_t, long>);
int64_t id;
if (!PyArg_ParseTuple(args, "l", &id)) return nullptr;
@ -201,8 +201,8 @@ PyObject *PyGraphGetVertexById(PyGraph *self, PyObject *args) {
}
PyObject *PyGraphIterVertices(PyGraph *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self->graph);
CHECK(self->memory);
MG_ASSERT(self->graph);
MG_ASSERT(self->memory);
auto *vertices_it = mgp_graph_iter_vertices(self->graph, self->memory);
if (!vertices_it) {
PyErr_SetString(PyExc_MemoryError,
@ -222,7 +222,7 @@ PyObject *PyGraphIterVertices(PyGraph *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyGraphMustAbort(PyGraph *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self->graph);
MG_ASSERT(self->graph);
return PyBool_FromLong(mgp_must_abort(self->graph));
}
@ -255,7 +255,7 @@ static PyTypeObject PyGraphType = {
// clang-format on
PyObject *MakePyGraph(const mgp_graph *graph, mgp_memory *memory) {
CHECK(!graph || (graph && memory));
MG_ASSERT(!graph || (graph && memory));
auto *py_graph = PyObject_New(PyGraph, &PyGraphType);
if (!py_graph) return nullptr;
py_graph->graph = graph;
@ -281,7 +281,7 @@ static PyTypeObject PyCypherTypeType = {
// clang-format on
PyObject *MakePyCypherType(const mgp_type *type) {
CHECK(type);
MG_ASSERT(type);
auto *py_type = PyObject_New(PyCypherType, &PyCypherTypeType);
if (!py_type) return nullptr;
py_type->type = type;
@ -296,7 +296,7 @@ struct PyQueryProc {
// clang-format on
PyObject *PyQueryProcAddArg(PyQueryProc *self, PyObject *args) {
CHECK(self->proc);
MG_ASSERT(self->proc);
const char *name = nullptr;
PyObject *py_type = nullptr;
if (!PyArg_ParseTuple(args, "sO", &name, &py_type)) return nullptr;
@ -313,7 +313,7 @@ PyObject *PyQueryProcAddArg(PyQueryProc *self, PyObject *args) {
}
PyObject *PyQueryProcAddOptArg(PyQueryProc *self, PyObject *args) {
CHECK(self->proc);
MG_ASSERT(self->proc);
const char *name = nullptr;
PyObject *py_type = nullptr;
PyObject *py_value = nullptr;
@ -341,7 +341,7 @@ PyObject *PyQueryProcAddOptArg(PyQueryProc *self, PyObject *args) {
PyErr_SetString(PyExc_RuntimeError, e.what());
return nullptr;
}
CHECK(value);
MG_ASSERT(value);
if (!mgp_proc_add_opt_arg(self->proc, name, type, value)) {
mgp_value_destroy(value);
PyErr_SetString(PyExc_ValueError, "Invalid call to mgp_proc_add_opt_arg.");
@ -352,7 +352,7 @@ PyObject *PyQueryProcAddOptArg(PyQueryProc *self, PyObject *args) {
}
PyObject *PyQueryProcAddResult(PyQueryProc *self, PyObject *args) {
CHECK(self->proc);
MG_ASSERT(self->proc);
const char *name = nullptr;
PyObject *py_type = nullptr;
if (!PyArg_ParseTuple(args, "sO", &name, &py_type)) return nullptr;
@ -369,7 +369,7 @@ PyObject *PyQueryProcAddResult(PyQueryProc *self, PyObject *args) {
}
PyObject *PyQueryProcAddDeprecatedResult(PyQueryProc *self, PyObject *args) {
CHECK(self->proc);
MG_ASSERT(self->proc);
const char *name = nullptr;
PyObject *py_type = nullptr;
if (!PyArg_ParseTuple(args, "sO", &name, &py_type)) return nullptr;
@ -422,8 +422,8 @@ struct PyQueryModule {
// clang-format on
py::Object MgpListToPyTuple(const mgp_list *list, PyGraph *py_graph) {
CHECK(list);
CHECK(py_graph);
MG_ASSERT(list);
MG_ASSERT(py_graph);
const size_t len = mgp_list_size(list);
py::Object py_tuple(PyTuple_New(len));
if (!py_tuple) return nullptr;
@ -478,7 +478,7 @@ std::optional<py::ExceptionInfo> AddRecordFromPython(mgp_result *result,
for (Py_ssize_t i = 0; i < len; ++i) {
auto *item = PyList_GET_ITEM(items.Ptr(), i);
if (!item) return py::FetchError();
CHECK(PyTuple_Check(item));
MG_ASSERT(PyTuple_Check(item));
auto *key = PyTuple_GetItem(item, 0);
if (!key) return py::FetchError();
if (!PyUnicode_Check(key)) {
@ -502,7 +502,7 @@ std::optional<py::ExceptionInfo> AddRecordFromPython(mgp_result *result,
PyErr_SetString(PyExc_ValueError, e.what());
return py::FetchError();
}
CHECK(field_val);
MG_ASSERT(field_val);
if (!mgp_result_record_insert(record, field_name, field_val)) {
std::stringstream ss;
ss << "Unable to insert field '" << py::Object::FromBorrow(key)
@ -565,16 +565,22 @@ void CallPythonProcedure(py::Object py_cb, const mgp_list *args,
// internally used `mgp_*` structs will stay unfreed and a memory leak
// will be reported at the end of the query execution.
py::Object gc(PyImport_ImportModule("gc"));
LOG_IF(FATAL, !gc) << py::FetchError().value();
LOG_IF(FATAL, !gc.CallMethod("collect")) << py::FetchError().value();
if (!gc) {
LOG_FATAL(py::FetchError().value());
}
if (!gc.CallMethod("collect")) {
LOG_FATAL(py::FetchError().value());
}
// After making sure all references from our side have been cleared,
// invalidate the `_mgp.Graph` object. If the user kept a reference to one
// of our `_mgp` instances then this will prevent them from using those
// objects (whose internal `mgp_*` pointers are now invalid and would cause
// a crash).
LOG_IF(FATAL, !py_graph.CallMethod("invalidate"))
<< py::FetchError().value();
if (!py_graph.CallMethod("invalidate")) {
LOG_FATAL(py::FetchError().value());
}
};
// It is *VERY IMPORTANT* to note that this code takes great care not to keep
@ -610,7 +616,7 @@ void CallPythonProcedure(py::Object py_cb, const mgp_list *args,
} // namespace
PyObject *PyQueryModuleAddReadProcedure(PyQueryModule *self, PyObject *cb) {
CHECK(self->module);
MG_ASSERT(self->module);
if (!PyCallable_Check(cb)) {
PyErr_SetString(PyExc_TypeError, "Expected a callable object.");
return nullptr;
@ -666,7 +672,7 @@ static PyTypeObject PyQueryModuleType = {
// clang-format on
PyObject *MakePyQueryModule(mgp_module *module) {
CHECK(module);
MG_ASSERT(module);
auto *py_query_module = PyObject_New(PyQueryModule, &PyQueryModuleType);
if (!py_query_module) return nullptr;
py_query_module->module = module;
@ -780,8 +786,8 @@ struct PyPropertiesIterator {
// clang-format on
void PyPropertiesIteratorDealloc(PyPropertiesIterator *self) {
CHECK(self->it);
CHECK(self->py_graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
// Avoid invoking `mgp_properties_iterator_destroy` if we are not in valid
// execution context. The query execution should free all memory used during
// execution, so we may cause a double free issue.
@ -792,9 +798,9 @@ void PyPropertiesIteratorDealloc(PyPropertiesIterator *self) {
PyObject *PyPropertiesIteratorGet(PyPropertiesIterator *self,
PyObject *Py_UNUSED(ignored)) {
CHECK(self->it);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *property = mgp_properties_iterator_get(self->it);
if (!property) Py_RETURN_NONE;
py::Object py_name(PyUnicode_FromString(property->name));
@ -806,9 +812,9 @@ PyObject *PyPropertiesIteratorGet(PyPropertiesIterator *self,
PyObject *PyPropertiesIteratorNext(PyPropertiesIterator *self,
PyObject *Py_UNUSED(ignored)) {
CHECK(self->it);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->it);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *property = mgp_properties_iterator_next(self->it);
if (!property) Py_RETURN_NONE;
py::Object py_name(PyUnicode_FromString(property->name));
@ -847,36 +853,36 @@ struct PyEdge {
// clang-format on
PyObject *PyEdgeGetTypeName(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->edge);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
return PyUnicode_FromString(mgp_edge_get_type(self->edge).name);
}
PyObject *PyEdgeFromVertex(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->edge);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *vertex = mgp_edge_get_from(self->edge);
CHECK(vertex);
MG_ASSERT(vertex);
return MakePyVertex(*vertex, self->py_graph);
}
PyObject *PyEdgeToVertex(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->edge);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const auto *vertex = mgp_edge_get_to(self->edge);
CHECK(vertex);
MG_ASSERT(vertex);
return MakePyVertex(*vertex, self->py_graph);
}
void PyEdgeDealloc(PyEdge *self) {
CHECK(self->edge);
CHECK(self->py_graph);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
// Avoid invoking `mgp_edge_destroy` if we are not in valid execution context.
// The query execution should free all memory used during execution, so we may
// cause a double free issue.
@ -890,18 +896,18 @@ PyObject *PyEdgeIsValid(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyEdgeGetId(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->edge);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
return PyLong_FromLongLong(mgp_edge_get_id(self->edge).as_int);
}
PyObject *PyEdgeIterProperties(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->edge);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
auto *properties_it =
mgp_edge_iter_properties(self->edge, self->py_graph->memory);
if (!properties_it) {
@ -922,10 +928,10 @@ PyObject *PyEdgeIterProperties(PyEdge *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyEdgeGetProperty(PyEdge *self, PyObject *args) {
CHECK(self);
CHECK(self->edge);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->edge);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const char *prop_name = nullptr;
if (!PyArg_ParseTuple(args, "s", &prop_name)) return nullptr;
auto *prop_value =
@ -980,8 +986,8 @@ static PyTypeObject PyEdgeType = {
/// The created instance references an existing `_mgp.Graph` instance, which
/// marks the execution context.
PyObject *MakePyEdge(const mgp_edge &edge, PyGraph *py_graph) {
CHECK(py_graph);
CHECK(py_graph->graph && py_graph->memory);
MG_ASSERT(py_graph);
MG_ASSERT(py_graph->graph && py_graph->memory);
auto *edge_copy = mgp_edge_copy(&edge, py_graph->memory);
if (!edge_copy) {
PyErr_SetString(PyExc_MemoryError, "Unable to allocate mgp_edge.");
@ -999,8 +1005,8 @@ PyObject *MakePyEdge(const mgp_edge &edge, PyGraph *py_graph) {
}
PyObject *PyEdgeRichCompare(PyObject *self, PyObject *other, int op) {
CHECK(self);
CHECK(other);
MG_ASSERT(self);
MG_ASSERT(other);
if (Py_TYPE(self) != &PyEdgeType || Py_TYPE(other) != &PyEdgeType ||
op != Py_EQ) {
@ -1009,8 +1015,8 @@ PyObject *PyEdgeRichCompare(PyObject *self, PyObject *other, int op) {
auto *e1 = reinterpret_cast<PyEdge *>(self);
auto *e2 = reinterpret_cast<PyEdge *>(other);
CHECK(e1->edge);
CHECK(e2->edge);
MG_ASSERT(e1->edge);
MG_ASSERT(e2->edge);
return PyBool_FromLong(mgp_edge_equal(e1->edge, e2->edge));
}
@ -1024,8 +1030,8 @@ struct PyVertex {
// clang-format on
void PyVertexDealloc(PyVertex *self) {
CHECK(self->vertex);
CHECK(self->py_graph);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
// Avoid invoking `mgp_vertex_destroy` if we are not in valid execution
// context. The query execution should free all memory used during
// execution, so we may cause a double free issue.
@ -1039,26 +1045,26 @@ PyObject *PyVertexIsValid(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyVertexGetId(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
return PyLong_FromLongLong(mgp_vertex_get_id(self->vertex).as_int);
}
PyObject *PyVertexLabelsCount(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
return PyLong_FromSize_t(mgp_vertex_labels_count(self->vertex));
}
PyObject *PyVertexLabelAt(PyVertex *self, PyObject *args) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
static_assert(std::numeric_limits<Py_ssize_t>::max() <=
std::numeric_limits<size_t>::max());
Py_ssize_t id;
@ -1073,10 +1079,10 @@ PyObject *PyVertexLabelAt(PyVertex *self, PyObject *args) {
}
PyObject *PyVertexIterInEdges(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
auto *edges_it =
mgp_vertex_iter_in_edges(self->vertex, self->py_graph->memory);
if (!edges_it) {
@ -1096,10 +1102,10 @@ PyObject *PyVertexIterInEdges(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyVertexIterOutEdges(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
auto *edges_it =
mgp_vertex_iter_out_edges(self->vertex, self->py_graph->memory);
if (!edges_it) {
@ -1119,10 +1125,10 @@ PyObject *PyVertexIterOutEdges(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyVertexIterProperties(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
auto *properties_it =
mgp_vertex_iter_properties(self->vertex, self->py_graph->memory);
if (!properties_it) {
@ -1143,10 +1149,10 @@ PyObject *PyVertexIterProperties(PyVertex *self, PyObject *Py_UNUSED(ignored)) {
}
PyObject *PyVertexGetProperty(PyVertex *self, PyObject *args) {
CHECK(self);
CHECK(self->vertex);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self);
MG_ASSERT(self->vertex);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
const char *prop_name = nullptr;
if (!PyArg_ParseTuple(args, "s", &prop_name)) return nullptr;
auto *prop_value =
@ -1199,10 +1205,10 @@ static PyTypeObject PyVertexType = {
// clang-format on
PyObject *MakePyVertex(mgp_vertex *vertex, PyGraph *py_graph) {
CHECK(vertex);
CHECK(py_graph);
CHECK(py_graph->graph && py_graph->memory);
CHECK(vertex->GetMemoryResource() == py_graph->memory->impl);
MG_ASSERT(vertex);
MG_ASSERT(py_graph);
MG_ASSERT(py_graph->graph && py_graph->memory);
MG_ASSERT(vertex->GetMemoryResource() == py_graph->memory->impl);
auto *py_vertex = PyObject_New(PyVertex, &PyVertexType);
if (!py_vertex) return nullptr;
py_vertex->vertex = vertex;
@ -1212,8 +1218,8 @@ PyObject *MakePyVertex(mgp_vertex *vertex, PyGraph *py_graph) {
}
PyObject *MakePyVertex(const mgp_vertex &vertex, PyGraph *py_graph) {
CHECK(py_graph);
CHECK(py_graph->graph && py_graph->memory);
MG_ASSERT(py_graph);
MG_ASSERT(py_graph->graph && py_graph->memory);
auto *vertex_copy = mgp_vertex_copy(&vertex, py_graph->memory);
if (!vertex_copy) {
PyErr_SetString(PyExc_MemoryError, "Unable to allocate mgp_vertex.");
@ -1225,8 +1231,8 @@ PyObject *MakePyVertex(const mgp_vertex &vertex, PyGraph *py_graph) {
}
PyObject *PyVertexRichCompare(PyObject *self, PyObject *other, int op) {
CHECK(self);
CHECK(other);
MG_ASSERT(self);
MG_ASSERT(other);
if (Py_TYPE(self) != &PyVertexType || Py_TYPE(other) != &PyVertexType ||
op != Py_EQ) {
@ -1235,8 +1241,8 @@ PyObject *PyVertexRichCompare(PyObject *self, PyObject *other, int op) {
auto *v1 = reinterpret_cast<PyVertex *>(self);
auto *v2 = reinterpret_cast<PyVertex *>(other);
CHECK(v1->vertex);
CHECK(v2->vertex);
MG_ASSERT(v1->vertex);
MG_ASSERT(v2->vertex);
return PyBool_FromLong(mgp_vertex_equal(v1->vertex, v2->vertex));
}
@ -1250,8 +1256,8 @@ struct PyPath {
// clang-format on
void PyPathDealloc(PyPath *self) {
CHECK(self->path);
CHECK(self->py_graph);
MG_ASSERT(self->path);
MG_ASSERT(self->py_graph);
// Avoid invoking `mgp_path_destroy` if we are not in valid execution
// context. The query execution should free all memory used during
// execution, so we may cause a double free issue.
@ -1267,9 +1273,9 @@ PyObject *PyPathIsValid(PyPath *self, PyObject *Py_UNUSED(ignored)) {
PyObject *PyPathMakeWithStart(PyTypeObject *type, PyObject *vertex);
PyObject *PyPathExpand(PyPath *self, PyObject *edge) {
CHECK(self->path);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->path);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
if (Py_TYPE(edge) != &PyEdgeType) {
PyErr_SetString(PyExc_TypeError, "Expected a _mgp.Edge.");
return nullptr;
@ -1293,16 +1299,16 @@ PyObject *PyPathExpand(PyPath *self, PyObject *edge) {
}
PyObject *PyPathSize(PyPath *self, PyObject *Py_UNUSED(ignored)) {
CHECK(self->path);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->path);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
return PyLong_FromSize_t(mgp_path_size(self->path));
}
PyObject *PyPathVertexAt(PyPath *self, PyObject *args) {
CHECK(self->path);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->path);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
static_assert(std::numeric_limits<Py_ssize_t>::max() <=
std::numeric_limits<size_t>::max());
Py_ssize_t i;
@ -1316,9 +1322,9 @@ PyObject *PyPathVertexAt(PyPath *self, PyObject *args) {
}
PyObject *PyPathEdgeAt(PyPath *self, PyObject *args) {
CHECK(self->path);
CHECK(self->py_graph);
CHECK(self->py_graph->graph);
MG_ASSERT(self->path);
MG_ASSERT(self->py_graph);
MG_ASSERT(self->py_graph->graph);
static_assert(std::numeric_limits<Py_ssize_t>::max() <=
std::numeric_limits<size_t>::max());
Py_ssize_t i;
@ -1362,9 +1368,9 @@ static PyTypeObject PyPathType = {
// clang-format on
PyObject *MakePyPath(mgp_path *path, PyGraph *py_graph) {
CHECK(path);
CHECK(py_graph->graph && py_graph->memory);
CHECK(path->GetMemoryResource() == py_graph->memory->impl);
MG_ASSERT(path);
MG_ASSERT(py_graph->graph && py_graph->memory);
MG_ASSERT(path->GetMemoryResource() == py_graph->memory->impl);
auto *py_path = PyObject_New(PyPath, &PyPathType);
if (!py_path) return nullptr;
py_path->path = path;
@ -1374,8 +1380,8 @@ PyObject *MakePyPath(mgp_path *path, PyGraph *py_graph) {
}
PyObject *MakePyPath(const mgp_path &path, PyGraph *py_graph) {
CHECK(py_graph);
CHECK(py_graph->graph && py_graph->memory);
MG_ASSERT(py_graph);
MG_ASSERT(py_graph->graph && py_graph->memory);
auto *path_copy = mgp_path_copy(&path, py_graph->memory);
if (!path_copy) {
PyErr_SetString(PyExc_MemoryError, "Unable to allocate mgp_path.");
@ -1451,21 +1457,21 @@ namespace {
template <class TFun>
auto WithMgpModule(mgp_module *module_def, const TFun &fun) {
py::Object py_mgp(PyImport_ImportModule("_mgp"));
CHECK(py_mgp) << "Expected builtin '_mgp' to be available for import";
MG_ASSERT(py_mgp, "Expected builtin '_mgp' to be available for import");
py::Object py_mgp_module(py_mgp.GetAttr("_MODULE"));
CHECK(py_mgp_module) << "Expected '_mgp' to have attribute '_MODULE'";
MG_ASSERT(py_mgp_module, "Expected '_mgp' to have attribute '_MODULE'");
// NOTE: This check is not thread safe, but this should only go through
// ModuleRegistry::LoadModuleLibrary which ought to serialize loading.
CHECK(py_mgp_module.Ptr() == Py_None)
<< "Expected '_mgp._MODULE' to be None as we are just starting to "
"import a new module. Is some other thread also importing Python "
"modules?";
MG_ASSERT(py_mgp_module.Ptr() == Py_None,
"Expected '_mgp._MODULE' to be None as we are just starting to "
"import a new module. Is some other thread also importing Python "
"modules?");
auto *py_query_module = MakePyQueryModule(module_def);
CHECK(py_query_module);
CHECK(py_mgp.SetAttr("_MODULE", py_query_module));
MG_ASSERT(py_query_module);
MG_ASSERT(py_mgp.SetAttr("_MODULE", py_query_module));
auto ret = fun();
auto maybe_exc = py::FetchError();
CHECK(py_mgp.SetAttr("_MODULE", Py_None));
MG_ASSERT(py_mgp.SetAttr("_MODULE", Py_None));
if (maybe_exc) {
py::RestoreError(*maybe_exc);
}

View File

@ -7,8 +7,6 @@
#include <string_view>
#include <utility>
#include "glog/logging.h"
#include "utils/exceptions.hpp"
#include "utils/fnv.hpp"
@ -57,7 +55,7 @@ TypedValue::TypedValue(const storage::PropertyValue &value,
return;
}
}
LOG(FATAL) << "Unsupported type";
LOG_FATAL("Unsupported type");
}
TypedValue::TypedValue(storage::PropertyValue &&other) /* noexcept */
@ -145,7 +143,7 @@ TypedValue::TypedValue(const TypedValue &other, utils::MemoryResource *memory)
new (&path_v) Path(other.path_v, memory_);
return;
}
LOG(FATAL) << "Unsupported TypedValue::Type";
LOG_FATAL("Unsupported TypedValue::Type");
}
TypedValue::TypedValue(TypedValue &&other) noexcept
@ -285,7 +283,7 @@ std::ostream &operator<<(std::ostream &os, const TypedValue::Type &type) {
case TypedValue::Type::Path:
return os << "path";
}
LOG(FATAL) << "Unsupported TypedValue::Type";
LOG_FATAL("Unsupported TypedValue::Type");
}
#define DEFINE_TYPED_VALUE_COPY_ASSIGNMENT(type_param, typed_value_type, \
@ -421,7 +419,7 @@ TypedValue &TypedValue::operator=(const TypedValue &other) {
new (&path_v) Path(other.path_v, memory_);
return *this;
}
LOG(FATAL) << "Unsupported TypedValue::Type";
LOG_FATAL("Unsupported TypedValue::Type");
}
return *this;
}
@ -508,9 +506,7 @@ void TypedValue::DestroyValue() {
type_ = TypedValue::Type::Null;
}
TypedValue::~TypedValue() {
DestroyValue();
}
TypedValue::~TypedValue() { DestroyValue(); }
/**
* Returns the double value of a value.
@ -633,7 +629,7 @@ TypedValue operator==(const TypedValue &a, const TypedValue &b) {
case TypedValue::Type::Path:
return TypedValue(a.ValuePath() == b.ValuePath(), a.GetMemoryResource());
default:
LOG(FATAL) << "Unhandled comparison for types";
LOG_FATAL("Unhandled comparison for types");
}
}
@ -837,9 +833,9 @@ bool TypedValue::BoolEqual::operator()(const TypedValue &lhs,
case TypedValue::Type::Null:
return false;
default:
LOG(FATAL)
<< "Equality between two TypedValues resulted in something other "
"than Null or bool";
LOG_FATAL(
"Equality between two TypedValues resulted in something other "
"than Null or bool");
}
}
@ -882,7 +878,7 @@ size_t TypedValue::Hash::operator()(const TypedValue &value) const {
utils::FnvCollection<decltype(edges), EdgeAccessor>{}(edges);
}
}
LOG(FATAL) << "Unhandled TypedValue.type() in hash function";
LOG_FATAL("Unhandled TypedValue.type() in hash function");
}
} // namespace query

View File

@ -4,5 +4,5 @@ set(requests_src_files
find_package(CURL REQUIRED)
add_library(mg-requests STATIC ${requests_src_files})
target_link_libraries(mg-requests fmt glog gflags json ${CURL_LIBRARIES})
target_link_libraries(mg-requests spdlog fmt gflags json ${CURL_LIBRARIES})
target_include_directories(mg-requests PRIVATE ${CURL_INCLUDE_DIRS})

View File

@ -5,7 +5,8 @@
#include <curl/curl.h>
#include <fmt/format.h>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "utils/logging.hpp"
namespace requests {
@ -52,13 +53,13 @@ bool RequestPostJson(const std::string &url, const nlohmann::json &data,
curl_easy_cleanup(curl);
if (res != CURLE_OK) {
DLOG(WARNING) << "Couldn't perform request: " << curl_easy_strerror(res);
SPDLOG_WARN("Couldn't perform request: {}", curl_easy_strerror(res));
return false;
}
if (response_code != 200) {
DLOG(WARNING) << "Request response code isn't 200 (received "
<< response_code << ")!";
SPDLOG_WARN("Request response code isn't 200 (received {})!",
response_code);
return false;
}
@ -94,13 +95,13 @@ bool CreateAndDownloadFile(const std::string &url, const std::string &path,
std::fclose(file);
if (res != CURLE_OK) {
DLOG(WARNING) << "Couldn't perform request: " << curl_easy_strerror(res);
SPDLOG_WARN("Couldn't perform request: {}", curl_easy_strerror(res));
return false;
}
if (response_code != 200) {
DLOG(WARNING) << "Request response code isn't 200 (received "
<< response_code << ")!";
SPDLOG_WARN("Request response code isn't 200 (received {})!",
response_code);
return false;
}

View File

@ -4,5 +4,5 @@ set(rpc_src_files
server.cpp)
add_library(mg-rpc STATIC ${rpc_src_files})
target_link_libraries(mg-rpc Threads::Threads mg-communication mg-utils mg-io fmt glog gflags)
target_link_libraries(mg-rpc Threads::Threads mg-communication mg-utils mg-io fmt gflags)
target_link_libraries(mg-rpc mg-slk)

View File

@ -4,14 +4,13 @@
#include <mutex>
#include <optional>
#include <glog/logging.h>
#include "communication/client.hpp"
#include "io/network/endpoint.hpp"
#include "rpc/exceptions.hpp"
#include "rpc/messages.hpp"
#include "slk/serialization.hpp"
#include "slk/streams.hpp"
#include "utils/logging.hpp"
#include "utils/on_scope_exit.hpp"
namespace rpc {
@ -88,12 +87,12 @@ class Client {
// Check the response ID.
if (res_id != res_type.id) {
LOG(ERROR) << "Message response was of unexpected type";
spdlog::error("Message response was of unexpected type");
self_->client_ = std::nullopt;
throw RpcFailedException(self_->endpoint_);
}
VLOG(12) << "[RpcClient] received " << res_type.name;
SPDLOG_TRACE("[RpcClient] received {}", res_type.name);
return res_load_(&res_reader);
}
@ -136,7 +135,7 @@ class Client {
Args &&...args) {
typename TRequestResponse::Request request(std::forward<Args>(args)...);
auto req_type = TRequestResponse::Request::kType;
VLOG(12) << "[RpcClient] sent " << req_type.name;
SPDLOG_TRACE("[RpcClient] sent {}", req_type.name);
std::unique_lock<std::mutex> guard(mutex_);
@ -150,7 +149,7 @@ class Client {
if (!client_) {
client_.emplace(context_);
if (!client_->Connect(endpoint_)) {
DLOG(ERROR) << "Couldn't connect to remote address " << endpoint_;
SPDLOG_ERROR("Couldn't connect to remote address {}", endpoint_);
client_ = std::nullopt;
throw RpcFailedException(endpoint_);
}

View File

@ -57,11 +57,11 @@ void Session::Execute() {
throw SessionException(
"Session trying to execute an unregistered RPC call!");
}
VLOG(12) << "[RpcServer] received " << extended_it->second.req_type.name;
SPDLOG_TRACE("[RpcServer] received {}", extended_it->second.req_type.name);
slk::Save(extended_it->second.res_type.id, &res_builder);
extended_it->second.callback(endpoint_, &req_reader, &res_builder);
} else {
VLOG(12) << "[RpcServer] received " << it->second.req_type.name;
SPDLOG_TRACE("[RpcServer] received {}", it->second.req_type.name);
slk::Save(it->second.res_type.id, &res_builder);
it->second.callback(&req_reader, &res_builder);
}
@ -70,10 +70,10 @@ void Session::Execute() {
req_reader.Finalize();
res_builder.Finalize();
VLOG(12) << "[RpcServer] sent "
<< (it != server_->callbacks_.end()
? it->second.res_type.name
: extended_it->second.res_type.name);
SPDLOG_TRACE(
"[RpcServer] sent {}",
(it != server_->callbacks_.end() ? it->second.res_type.name
: extended_it->second.res_type.name));
}
} // namespace rpc

View File

@ -31,8 +31,8 @@ class Server {
template <class TRequestResponse>
void Register(std::function<void(slk::Reader *, slk::Builder *)> callback) {
std::lock_guard<std::mutex> guard(lock_);
CHECK(!server_.IsRunning())
<< "You can't register RPCs when the server is running!";
MG_ASSERT(!server_.IsRunning(),
"You can't register RPCs when the server is running!");
RpcCallback rpc;
rpc.req_type = TRequestResponse::Request::kType;
rpc.res_type = TRequestResponse::Response::kType;
@ -40,13 +40,13 @@ class Server {
if (extended_callbacks_.find(TRequestResponse::Request::kType.id) !=
extended_callbacks_.end()) {
LOG(FATAL) << "Callback for that message type already registered!";
LOG_FATAL("Callback for that message type already registered!");
}
auto got = callbacks_.insert({TRequestResponse::Request::kType.id, rpc});
CHECK(got.second) << "Callback for that message type already registered";
VLOG(12) << "[RpcServer] register " << rpc.req_type.name << " -> "
<< rpc.res_type.name;
MG_ASSERT(got.second, "Callback for that message type already registered");
SPDLOG_TRACE("[RpcServer] register {} -> {}", rpc.req_type.name,
rpc.res_type.name);
}
template <class TRequestResponse>
@ -54,8 +54,8 @@ class Server {
slk::Builder *)>
callback) {
std::lock_guard<std::mutex> guard(lock_);
CHECK(!server_.IsRunning())
<< "You can't register RPCs when the server is running!";
MG_ASSERT(!server_.IsRunning(),
"You can't register RPCs when the server is running!");
RpcExtendedCallback rpc;
rpc.req_type = TRequestResponse::Request::kType;
rpc.res_type = TRequestResponse::Response::kType;
@ -63,9 +63,9 @@ class Server {
auto got =
extended_callbacks_.insert({TRequestResponse::Request::kType.id, rpc});
CHECK(got.second) << "Callback for that message type already registered";
VLOG(12) << "[RpcServer] register " << rpc.req_type.name << " -> "
<< rpc.res_type.name;
MG_ASSERT(got.second, "Callback for that message type already registered");
SPDLOG_TRACE("[RpcServer] register {} -> {}", rpc.req_type.name,
rpc.res_type.name);
}
private:

View File

@ -2,5 +2,5 @@ set(slk_src_files
streams.cpp)
add_library(mg-slk STATIC ${slk_src_files})
target_link_libraries(mg-slk glog gflags)
target_link_libraries(mg-slk gflags)
target_link_libraries(mg-slk mg-utils)

View File

@ -2,7 +2,7 @@
#include <cstring>
#include <glog/logging.h>
#include "utils/logging.hpp"
namespace slk {
@ -32,7 +32,7 @@ void Builder::Finalize() { FlushSegment(true); }
void Builder::FlushSegment(bool final_segment) {
if (!final_segment && pos_ < kSegmentMaxDataSize) return;
CHECK(pos_ > 0) << "Trying to flush out a segment that has no data in it!";
MG_ASSERT(pos_ > 0, "Trying to flush out a segment that has no data in it!");
size_t total_size = sizeof(SegmentSize) + pos_;

View File

@ -27,7 +27,7 @@ if(MG_ENTERPRISE)
endif()
add_library(mg-storage-v2 STATIC ${storage_v2_src_files})
target_link_libraries(mg-storage-v2 Threads::Threads mg-utils glog gflags)
target_link_libraries(mg-storage-v2 Threads::Threads mg-utils gflags)
if(MG_ENTERPRISE)
add_dependencies(mg-storage-v2 generate_lcp_storage)

View File

@ -5,6 +5,7 @@
#include <map>
#include "storage/v2/mvcc.hpp"
#include "utils/logging.hpp"
namespace storage {
namespace {
@ -32,7 +33,7 @@ bool LastCommittedVersionHasLabelProperty(
const Vertex &vertex, LabelId label, const std::set<PropertyId> &properties,
const std::vector<PropertyValue> &value_array,
const Transaction &transaction, uint64_t commit_timestamp) {
CHECK(properties.size() == value_array.size()) << "Invalid database state!";
MG_ASSERT(properties.size() == value_array.size(), "Invalid database state!");
PropertyIdArray property_array(properties.size());
bool current_value_equal_to_value[kUniqueConstraintsMaxProperties];
@ -78,25 +79,25 @@ bool LastCommittedVersionHasLabelProperty(
break;
}
case Delta::Action::DELETE_OBJECT: {
CHECK(!deleted) << "Invalid database state!";
MG_ASSERT(!deleted, "Invalid database state!");
deleted = true;
break;
}
case Delta::Action::RECREATE_OBJECT: {
CHECK(deleted) << "Invalid database state!";
MG_ASSERT(deleted, "Invalid database state!");
deleted = false;
break;
}
case Delta::Action::ADD_LABEL: {
if (delta->label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
break;
}
}
case Delta::Action::REMOVE_LABEL: {
if (delta->label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
break;
}
@ -127,7 +128,7 @@ bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label,
const std::set<PropertyId> &properties,
const std::vector<PropertyValue> &values,
uint64_t timestamp) {
CHECK(properties.size() == values.size()) << "Invalid database state!";
MG_ASSERT(properties.size() == values.size(), "Invalid database state!");
PropertyIdArray property_array(properties.size());
bool current_value_equal_to_value[kUniqueConstraintsMaxProperties];
@ -172,13 +173,13 @@ bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label,
switch (delta->action) {
case Delta::Action::ADD_LABEL:
if (delta->label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
}
break;
case Delta::Action::REMOVE_LABEL:
if (delta->label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
}
break;
@ -191,12 +192,12 @@ bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label,
break;
}
case Delta::Action::RECREATE_OBJECT: {
CHECK(deleted) << "Invalid database state!";
MG_ASSERT(deleted, "Invalid database state!");
deleted = false;
break;
}
case Delta::Action::DELETE_OBJECT: {
CHECK(!deleted) << "Invalid database state!";
MG_ASSERT(!deleted, "Invalid database state!");
deleted = true;
break;
}

View File

@ -7,6 +7,7 @@
#include "storage/v2/id_types.hpp"
#include "storage/v2/transaction.hpp"
#include "storage/v2/vertex.hpp"
#include "utils/logging.hpp"
#include "utils/result.hpp"
#include "utils/skip_list.hpp"
@ -24,7 +25,7 @@ struct FixedCapacityArray {
T values[kUniqueConstraintsMaxProperties];
explicit FixedCapacityArray(size_t array_size) : size(array_size) {
CHECK(size <= kUniqueConstraintsMaxProperties) << "Invalid array size!";
MG_ASSERT(size <= kUniqueConstraintsMaxProperties, "Invalid array size!");
}
};

View File

@ -2,11 +2,10 @@
#include <atomic>
#include <glog/logging.h>
#include "storage/v2/edge_ref.hpp"
#include "storage/v2/id_types.hpp"
#include "storage/v2/property_value.hpp"
#include "utils/logging.hpp"
namespace storage {
@ -64,25 +63,25 @@ class PreviousPtr {
} else if (type == kEdge) {
return Pointer{reinterpret_cast<Edge *>(value & ~kMask)};
} else {
LOG(FATAL) << "Invalid pointer type!";
LOG_FATAL("Invalid pointer type!");
}
}
void Set(Delta *delta) {
uintptr_t value = reinterpret_cast<uintptr_t>(delta);
CHECK((value & kMask) == 0) << "Invalid pointer!";
MG_ASSERT((value & kMask) == 0, "Invalid pointer!");
storage_.store(value | kDelta, std::memory_order_release);
}
void Set(Vertex *vertex) {
uintptr_t value = reinterpret_cast<uintptr_t>(vertex);
CHECK((value & kMask) == 0) << "Invalid pointer!";
MG_ASSERT((value & kMask) == 0, "Invalid pointer!");
storage_.store(value | kVertex, std::memory_order_release);
}
void Set(Edge *edge) {
uintptr_t value = reinterpret_cast<uintptr_t>(edge);
CHECK((value & kMask) == 0) << "Invalid pointer!";
MG_ASSERT((value & kMask) == 0, "Invalid pointer!");
storage_.store(value | kEdge, std::memory_order_release);
}

View File

@ -16,6 +16,7 @@
#include "storage/v2/durability/paths.hpp"
#include "storage/v2/durability/snapshot.hpp"
#include "storage/v2/durability/wal.hpp"
#include "utils/logging.hpp"
namespace storage::durability {
@ -31,9 +32,8 @@ void VerifyStorageDirectoryOwnerAndProcessUserOrDie(
// The directory doesn't currently exist.
return;
}
CHECK(ret == 0) << "Couldn't get stat for '" << storage_directory
<< "' because of: " << strerror(errno) << " (" << errno
<< ")";
MG_ASSERT(ret == 0, "Couldn't get stat for '{}' because of: {} ({})",
storage_directory, strerror(errno), errno);
auto directory_owner = statbuf.st_uid;
auto get_username = [](auto uid) {
@ -44,10 +44,10 @@ void VerifyStorageDirectoryOwnerAndProcessUserOrDie(
auto user_process = get_username(process_euid);
auto user_directory = get_username(directory_owner);
CHECK(process_euid == directory_owner)
<< "The process is running as user " << user_process
<< ", but the data directory is owned by user " << user_directory
<< ". Please start the process as user " << user_directory << "!";
MG_ASSERT(process_euid == directory_owner,
"The process is running as user {}, but the data directory is "
"owned by user {}. Please start the process as user {}!",
user_process, user_directory, user_directory);
}
std::vector<SnapshotDurabilityInfo> GetSnapshotFiles(
@ -69,8 +69,9 @@ std::vector<SnapshotDurabilityInfo> GetSnapshotFiles(
continue;
}
}
CHECK(!error_code) << "Couldn't recover data because an error occurred: "
<< error_code.message() << "!";
MG_ASSERT(!error_code,
"Couldn't recover data because an error occurred: {}!",
error_code.message());
}
return snapshot_files;
@ -94,12 +95,12 @@ std::optional<std::vector<WalDurabilityInfo>> GetWalFiles(
info.to_timestamp, std::move(info.uuid),
std::move(info.epoch_id), item.path());
} catch (const RecoveryFailure &e) {
DLOG(WARNING) << "Failed to read " << item.path();
spdlog::warn("Failed to read {}", item.path());
continue;
}
}
CHECK(!error_code) << "Couldn't recover data because an error occurred: "
<< error_code.message() << "!";
MG_ASSERT(!error_code, "Couldn't recover data because an error occurred: {}!",
error_code.message());
std::sort(wal_files.begin(), wal_files.end());
return std::move(wal_files);
@ -170,26 +171,28 @@ std::optional<RecoveryInfo> RecoverData(
for (auto it = snapshot_files.rbegin(); it != snapshot_files.rend(); ++it) {
const auto &[path, file_uuid, _] = *it;
if (file_uuid != *uuid) {
LOG(WARNING) << "The snapshot file " << path
<< " isn't related to the latest snapshot file!";
spdlog::warn(
"The snapshot file {} isn't related to the latest snapshot file!",
path);
continue;
}
LOG(INFO) << "Starting snapshot recovery from " << path;
spdlog::info("Starting snapshot recovery from {}", path);
try {
recovered_snapshot = LoadSnapshot(path, vertices, edges, epoch_history,
name_id_mapper, edge_count, items);
LOG(INFO) << "Snapshot recovery successful!";
spdlog::info("Snapshot recovery successful!");
break;
} catch (const RecoveryFailure &e) {
LOG(WARNING) << "Couldn't recover snapshot from " << path
<< " because of: " << e.what();
spdlog::warn("Couldn't recover snapshot from {} because of: {}", path,
e.what());
continue;
}
}
CHECK(recovered_snapshot)
<< "The database is configured to recover on startup, but couldn't "
"recover using any of the specified snapshots! Please inspect them "
"and restart the database.";
MG_ASSERT(
recovered_snapshot,
"The database is configured to recover on startup, but couldn't "
"recover using any of the specified snapshots! Please inspect them "
"and restart the database.");
recovery_info = recovered_snapshot->recovery_info;
indices_constraints = std::move(recovered_snapshot->indices_constraints);
snapshot_timestamp = recovered_snapshot->snapshot_info.start_timestamp;
@ -230,8 +233,9 @@ std::optional<RecoveryInfo> RecoverData(
continue;
}
}
CHECK(!error_code) << "Couldn't recover data because an error occurred: "
<< error_code.message() << "!";
MG_ASSERT(!error_code,
"Couldn't recover data because an error occurred: {}!",
error_code.message());
if (wal_files.empty()) return std::nullopt;
std::sort(wal_files.begin(), wal_files.end());
// UUID used for durability is the UUID of the last WAL file.
@ -253,9 +257,10 @@ std::optional<RecoveryInfo> RecoverData(
// a WAL file. The above `else` has an early exit in case there are no WAL
// files. Because we reached this point there must have been some WAL files
// and we must have some WAL files after this second WAL directory iteration.
CHECK(snapshot_timestamp || !wal_files.empty())
<< "The database didn't recover from a snapshot and didn't find any WAL "
"files that match the last WAL file!";
MG_ASSERT(
snapshot_timestamp || !wal_files.empty(),
"The database didn't recover from a snapshot and didn't find any WAL "
"files that match the last WAL file!");
if (!wal_files.empty()) {
{
@ -266,14 +271,16 @@ std::optional<RecoveryInfo> RecoverData(
// We didn't recover from a snapshot and we must have all WAL files
// starting from the first one (seq_num == 0) to be able to recover
// data from them.
LOG(FATAL) << "There are missing prefix WAL files and data can't be "
"recovered without them!";
LOG_FATAL(
"There are missing prefix WAL files and data can't be "
"recovered without them!");
} else if (first_wal.to_timestamp >= *snapshot_timestamp) {
// We recovered from a snapshot and we must have at least one WAL file
// whose all deltas were created before the snapshot in order to
// verify that nothing is missing from the beginning of the WAL chain.
LOG(FATAL) << "You must have at least one WAL file that contains "
"deltas that were created before the snapshot file!";
LOG_FATAL(
"You must have at least one WAL file that contains "
"deltas that were created before the snapshot file!");
}
}
}
@ -281,8 +288,8 @@ std::optional<RecoveryInfo> RecoverData(
auto last_loaded_timestamp = snapshot_timestamp;
for (auto &wal_file : wal_files) {
if (previous_seq_num && (wal_file.seq_num - *previous_seq_num) > 1) {
LOG(FATAL) << "You are missing a WAL file with the sequence number "
<< *previous_seq_num + 1 << "!";
LOG_FATAL("You are missing a WAL file with the sequence number {}!",
*previous_seq_num + 1);
}
previous_seq_num = wal_file.seq_num;
@ -310,8 +317,8 @@ std::optional<RecoveryInfo> RecoverData(
recovery_info.last_commit_timestamp = info.last_commit_timestamp;
} catch (const RecoveryFailure &e) {
LOG(FATAL) << "Couldn't recover WAL deltas from " << wal_file.path
<< " because of: " << e.what();
LOG_FATAL("Couldn't recover WAL deltas from {} because of: {}",
wal_file.path, e.what());
}
if (recovery_info.next_timestamp != 0) {

View File

@ -10,6 +10,7 @@
#include "storage/v2/mvcc.hpp"
#include "storage/v2/vertex_accessor.hpp"
#include "utils/file_locker.hpp"
#include "utils/logging.hpp"
namespace storage::durability {
@ -625,7 +626,7 @@ void CreateSnapshot(
// Create snapshot file.
auto path =
snapshot_directory / MakeSnapshotName(transaction->start_timestamp);
LOG(INFO) << "Starting snapshot creation to " << path;
spdlog::info("Starting snapshot creation to {}", path);
Encoder snapshot;
snapshot.Initialize(path, kSnapshotMagic, kVersion);
@ -710,7 +711,7 @@ void CreateSnapshot(
// Get edge data.
auto maybe_props = ea.Properties(View::OLD);
CHECK(maybe_props.HasValue()) << "Invalid database state!";
MG_ASSERT(maybe_props.HasValue(), "Invalid database state!");
// Store the edge.
{
@ -742,13 +743,13 @@ void CreateSnapshot(
// TODO (mferencevic): All of these functions could be written into a
// single function so that we traverse the undo deltas only once.
auto maybe_labels = va->Labels(View::OLD);
CHECK(maybe_labels.HasValue()) << "Invalid database state!";
MG_ASSERT(maybe_labels.HasValue(), "Invalid database state!");
auto maybe_props = va->Properties(View::OLD);
CHECK(maybe_props.HasValue()) << "Invalid database state!";
MG_ASSERT(maybe_props.HasValue(), "Invalid database state!");
auto maybe_in_edges = va->InEdges(View::OLD);
CHECK(maybe_in_edges.HasValue()) << "Invalid database state!";
MG_ASSERT(maybe_in_edges.HasValue(), "Invalid database state!");
auto maybe_out_edges = va->OutEdges(View::OLD);
CHECK(maybe_out_edges.HasValue()) << "Invalid database state!";
MG_ASSERT(maybe_out_edges.HasValue(), "Invalid database state!");
// Store the vertex.
{
@ -886,7 +887,7 @@ void CreateSnapshot(
// Finalize snapshot file.
snapshot.Finalize();
LOG(INFO) << "Snapshot creation successful!";
spdlog::info("Snapshot creation successful!");
// Ensure exactly `snapshot_retention_count` snapshots exist.
std::vector<std::pair<uint64_t, std::filesystem::path>> old_snapshot_files;
@ -901,15 +902,18 @@ void CreateSnapshot(
if (info.uuid != uuid) continue;
old_snapshot_files.emplace_back(info.start_timestamp, item.path());
} catch (const RecoveryFailure &e) {
LOG(WARNING) << "Found a corrupt snapshot file " << item.path()
<< " because of: " << e.what();
spdlog::warn("Found a corrupt snapshot file {} becuase of: {}",
item.path(), e.what());
continue;
}
}
LOG_IF(ERROR, error_code)
<< "Couldn't ensure that exactly " << snapshot_retention_count
<< " snapshots exist because an error occurred: "
<< error_code.message() << "!";
if (error_code) {
spdlog::error(
"Couldn't ensure that exactly {} snapshots exist because an error "
"occurred: {}",
snapshot_retention_count, error_code.message());
}
std::sort(old_snapshot_files.begin(), old_snapshot_files.end());
if (old_snapshot_files.size() > snapshot_retention_count - 1) {
auto num_to_erase =
@ -941,10 +945,13 @@ void CreateSnapshot(
continue;
}
}
LOG_IF(ERROR, error_code)
<< "Couldn't ensure that only the absolutely necessary WAL files exist "
"because an error occurred: "
<< error_code.message() << "!";
if (error_code) {
spdlog::error(
"Couldn't ensure that only the absolutely necessary WAL files exist "
"because an error occurred: {}",
error_code.message());
}
std::sort(wal_files.begin(), wal_files.end());
uint64_t snapshot_start_timestamp = transaction->start_timestamp;
if (!old_snapshot_files.empty()) {

View File

@ -7,6 +7,7 @@
#include "storage/v2/edge.hpp"
#include "storage/v2/vertex.hpp"
#include "utils/file_locker.hpp"
#include "utils/logging.hpp"
namespace storage::durability {
@ -545,7 +546,7 @@ void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper,
case Delta::Action::REMOVE_IN_EDGE:
// These actions are already encoded in the *_OUT_EDGE actions. This
// function should never be called for this type of deltas.
LOG(FATAL) << "Invalid delta action!";
LOG_FATAL("Invalid delta action!");
}
}
@ -578,7 +579,7 @@ void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper,
// these deltas don't contain any information about the from vertex, to
// vertex or edge type so they are useless. This function should never
// be called for this type of deltas.
LOG(FATAL) << "Invalid delta action!";
LOG_FATAL("Invalid delta action!");
case Delta::Action::ADD_LABEL:
case Delta::Action::REMOVE_LABEL:
case Delta::Action::ADD_OUT_EDGE:
@ -586,7 +587,7 @@ void EncodeDelta(BaseEncoder *encoder, NameIdMapper *name_id_mapper,
case Delta::Action::ADD_IN_EDGE:
case Delta::Action::REMOVE_IN_EDGE:
// These deltas shouldn't appear for edges.
LOG(FATAL) << "Invalid database state!";
LOG_FATAL("Invalid database state!");
}
}
@ -605,7 +606,7 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper,
switch (operation) {
case StorageGlobalOperation::LABEL_INDEX_CREATE:
case StorageGlobalOperation::LABEL_INDEX_DROP: {
CHECK(properties.empty()) << "Invalid function call!";
MG_ASSERT(properties.empty(), "Invalid function call!");
encoder->WriteMarker(OperationToMarker(operation));
encoder->WriteString(name_id_mapper->IdToName(label.AsUint()));
break;
@ -614,7 +615,7 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper,
case StorageGlobalOperation::LABEL_PROPERTY_INDEX_DROP:
case StorageGlobalOperation::EXISTENCE_CONSTRAINT_CREATE:
case StorageGlobalOperation::EXISTENCE_CONSTRAINT_DROP: {
CHECK(properties.size() == 1) << "Invalid function call!";
MG_ASSERT(properties.size() == 1, "Invalid function call!");
encoder->WriteMarker(OperationToMarker(operation));
encoder->WriteString(name_id_mapper->IdToName(label.AsUint()));
encoder->WriteString(
@ -623,7 +624,7 @@ void EncodeOperation(BaseEncoder *encoder, NameIdMapper *name_id_mapper,
}
case StorageGlobalOperation::UNIQUE_CONSTRAINT_CREATE:
case StorageGlobalOperation::UNIQUE_CONSTRAINT_DROP: {
CHECK(!properties.empty()) << "Invalid function call!";
MG_ASSERT(!properties.empty(), "Invalid function call!");
encoder->WriteMarker(OperationToMarker(operation));
encoder->WriteString(name_id_mapper->IdToName(label.AsUint()));
encoder->WriteUint(properties.size());
@ -937,7 +938,7 @@ RecoveryInfo LoadWal(const std::filesystem::path &path,
}
}
LOG(INFO) << "Applied " << deltas_applied << " deltas from WAL " << path;
spdlog::info("Applied {} deltas from WAL", deltas_applied, path);
return ret;
}

View File

@ -2,11 +2,11 @@
#include <limits>
#include "utils/spin_lock.hpp"
#include "storage/v2/delta.hpp"
#include "storage/v2/id_types.hpp"
#include "storage/v2/property_store.hpp"
#include "utils/logging.hpp"
#include "utils/spin_lock.hpp"
namespace storage {
@ -14,8 +14,8 @@ struct Vertex;
struct Edge {
Edge(Gid gid, Delta *delta) : gid(gid), deleted(false), delta(delta) {
CHECK(delta == nullptr || delta->action == Delta::Action::DELETE_OBJECT)
<< "Edge must be created with an initial DELETE_OBJECT delta!";
MG_ASSERT(delta == nullptr || delta->action == Delta::Action::DELETE_OBJECT,
"Edge must be created with an initial DELETE_OBJECT delta!");
}
Gid gid;

View File

@ -1,6 +1,7 @@
#include "indices.hpp"
#include "storage/v2/mvcc.hpp"
#include "utils/logging.hpp"
namespace storage {
@ -49,23 +50,23 @@ bool AnyVersionHasLabel(const Vertex &vertex, LabelId label,
switch (delta.action) {
case Delta::Action::ADD_LABEL:
if (delta.label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
}
break;
case Delta::Action::REMOVE_LABEL:
if (delta.label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
}
break;
case Delta::Action::RECREATE_OBJECT: {
CHECK(deleted) << "Invalid database state!";
MG_ASSERT(deleted, "Invalid database state!");
deleted = false;
break;
}
case Delta::Action::DELETE_OBJECT: {
CHECK(!deleted) << "Invalid database state!";
MG_ASSERT(!deleted, "Invalid database state!");
deleted = true;
break;
}
@ -110,13 +111,13 @@ bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label,
switch (delta.action) {
case Delta::Action::ADD_LABEL:
if (delta.label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
}
break;
case Delta::Action::REMOVE_LABEL:
if (delta.label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
}
break;
@ -126,12 +127,12 @@ bool AnyVersionHasLabelProperty(const Vertex &vertex, LabelId label,
}
break;
case Delta::Action::RECREATE_OBJECT: {
CHECK(deleted) << "Invalid database state!";
MG_ASSERT(deleted, "Invalid database state!");
deleted = false;
break;
}
case Delta::Action::DELETE_OBJECT: {
CHECK(!deleted) << "Invalid database state!";
MG_ASSERT(!deleted, "Invalid database state!");
deleted = true;
break;
}
@ -164,25 +165,25 @@ bool CurrentVersionHasLabel(const Vertex &vertex, LabelId label,
switch (delta.action) {
case Delta::Action::REMOVE_LABEL: {
if (delta.label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
}
break;
}
case Delta::Action::ADD_LABEL: {
if (delta.label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
}
break;
}
case Delta::Action::DELETE_OBJECT: {
CHECK(!deleted) << "Invalid database state!";
MG_ASSERT(!deleted, "Invalid database state!");
deleted = true;
break;
}
case Delta::Action::RECREATE_OBJECT: {
CHECK(deleted) << "Invalid database state!";
MG_ASSERT(deleted, "Invalid database state!");
deleted = false;
break;
}
@ -227,24 +228,24 @@ bool CurrentVersionHasLabelProperty(const Vertex &vertex, LabelId label,
break;
}
case Delta::Action::DELETE_OBJECT: {
CHECK(!deleted) << "Invalid database state!";
MG_ASSERT(!deleted, "Invalid database state!");
deleted = true;
break;
}
case Delta::Action::RECREATE_OBJECT: {
CHECK(deleted) << "Invalid database state!";
MG_ASSERT(deleted, "Invalid database state!");
deleted = false;
break;
}
case Delta::Action::ADD_LABEL:
if (delta.label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
}
break;
case Delta::Action::REMOVE_LABEL:
if (delta.label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
}
break;
@ -488,8 +489,8 @@ LabelPropertyIndex::Iterable::Iterator::Iterator(
AdvanceUntilValid();
}
LabelPropertyIndex::Iterable::Iterator &LabelPropertyIndex::Iterable::Iterator::
operator++() {
LabelPropertyIndex::Iterable::Iterator &
LabelPropertyIndex::Iterable::Iterator::operator++() {
++index_iterator_;
AdvanceUntilValid();
return *this;
@ -601,7 +602,7 @@ LabelPropertyIndex::Iterable::Iterable(
switch (lower_bound_->value().type()) {
case PropertyValue::Type::Null:
// This shouldn't happen because of the nullopt-ing above.
LOG(FATAL) << "Invalid database state!";
LOG_FATAL("Invalid database state!");
break;
case PropertyValue::Type::Bool:
upper_bound_ = utils::MakeBoundExclusive(kSmallestNumber);
@ -629,7 +630,7 @@ LabelPropertyIndex::Iterable::Iterable(
switch (upper_bound_->value().type()) {
case PropertyValue::Type::Null:
// This shouldn't happen because of the nullopt-ing above.
LOG(FATAL) << "Invalid database state!";
LOG_FATAL("Invalid database state!");
break;
case PropertyValue::Type::Bool:
lower_bound_ = utils::MakeBoundInclusive(kSmallestBool);
@ -697,9 +698,9 @@ uint64_t SkipListLayerForEstimation(uint64_t N) {
int64_t LabelPropertyIndex::ApproximateVertexCount(
LabelId label, PropertyId property, const PropertyValue &value) const {
auto it = index_.find({label, property});
CHECK(it != index_.end())
<< "Index for label " << label.AsUint() << " and property "
<< property.AsUint() << " doesn't exist";
MG_ASSERT(it != index_.end(),
"Index for label {} and property {} doesn't exist", label.AsUint(),
property.AsUint());
auto acc = it->second.access();
return acc.estimate_count(value, SkipListLayerForEstimation(acc.size()));
}
@ -709,9 +710,9 @@ int64_t LabelPropertyIndex::ApproximateVertexCount(
const std::optional<utils::Bound<PropertyValue>> &lower,
const std::optional<utils::Bound<PropertyValue>> &upper) const {
auto it = index_.find({label, property});
CHECK(it != index_.end())
<< "Index for label " << label.AsUint() << " and property "
<< property.AsUint() << " doesn't exist";
MG_ASSERT(it != index_.end(),
"Index for label {} and property {} doesn't exist", label.AsUint(),
property.AsUint());
auto acc = it->second.access();
return acc.estimate_range_count(lower, upper,
SkipListLayerForEstimation(acc.size()));

View File

@ -9,6 +9,7 @@
#include "storage/v2/transaction.hpp"
#include "storage/v2/vertex_accessor.hpp"
#include "utils/bound.hpp"
#include "utils/logging.hpp"
#include "utils/skip_list.hpp"
namespace storage {
@ -107,16 +108,16 @@ class LabelIndex {
/// Returns an self with vertices visible from the given transaction.
Iterable Vertices(LabelId label, View view, Transaction *transaction) {
auto it = index_.find(label);
CHECK(it != index_.end())
<< "Index for label " << label.AsUint() << " doesn't exist";
MG_ASSERT(it != index_.end(), "Index for label {} doesn't exist",
label.AsUint());
return Iterable(it->second.access(), label, view, transaction, indices_,
constraints_, config_);
}
int64_t ApproximateVertexCount(LabelId label) {
auto it = index_.find(label);
CHECK(it != index_.end())
<< "Index for label " << label.AsUint() << " doesn't exist";
MG_ASSERT(it != index_.end(), "Index for label {} doesn't exist",
label.AsUint());
return it->second.size();
}
@ -144,7 +145,8 @@ class LabelPropertyIndex {
};
public:
LabelPropertyIndex(Indices *indices, Constraints *constraints, Config::Items config)
LabelPropertyIndex(Indices *indices, Constraints *constraints,
Config::Items config)
: indices_(indices), constraints_(constraints), config_(config) {}
/// @throw std::bad_alloc
@ -226,9 +228,9 @@ class LabelPropertyIndex {
const std::optional<utils::Bound<PropertyValue>> &upper_bound, View view,
Transaction *transaction) {
auto it = index_.find({label, property});
CHECK(it != index_.end())
<< "Index for label " << label.AsUint() << " and property "
<< property.AsUint() << " doesn't exist";
MG_ASSERT(it != index_.end(),
"Index for label {} and property {} doesn't exist",
label.AsUint(), property.AsUint());
return Iterable(it->second.access(), label, property, lower_bound,
upper_bound, view, transaction, indices_, constraints_,
config_);
@ -236,9 +238,9 @@ class LabelPropertyIndex {
int64_t ApproximateVertexCount(LabelId label, PropertyId property) const {
auto it = index_.find({label, property});
CHECK(it != index_.end())
<< "Index for label " << label.AsUint() << " and property "
<< property.AsUint() << " doesn't exist";
MG_ASSERT(it != index_.end(),
"Index for label {} and property {} doesn't exist",
label.AsUint(), property.AsUint());
return it->second.size();
}

View File

@ -4,6 +4,7 @@
#include <string>
#include <string_view>
#include "utils/logging.hpp"
#include "utils/skip_list.hpp"
namespace storage {
@ -74,8 +75,8 @@ class NameIdMapper final {
const std::string &IdToName(uint64_t id) const {
auto id_to_name_acc = id_to_name_.access();
auto result = id_to_name_acc.find(id);
CHECK(result != id_to_name_acc.end())
<< "Trying to get a name for an invalid ID!";
MG_ASSERT(result != id_to_name_acc.end(),
"Trying to get a name for an invalid ID!");
return result->name;
}

View File

@ -7,9 +7,8 @@
#include <type_traits>
#include <utility>
#include <glog/logging.h>
#include "utils/cast.hpp"
#include "utils/logging.hpp"
namespace storage {
@ -1037,8 +1036,8 @@ bool PropertyStore::SetProperty(PropertyId property,
// Encode the property into the data buffer.
Writer writer(data, size);
CHECK(EncodeProperty(&writer, property, value))
<< "Invalid database state!";
MG_ASSERT(EncodeProperty(&writer, property, value),
"Invalid database state!");
auto metadata = writer.WriteMetadata();
if (metadata) {
// If there is any space left in the buffer we add a tombstone to
@ -1105,8 +1104,8 @@ bool PropertyStore::SetProperty(PropertyId property,
if (!value.IsNull()) {
// We need to encode the new value.
Writer writer(data + info.property_begin, property_size);
CHECK(EncodeProperty(&writer, property, value))
<< "Invalid database state!";
MG_ASSERT(EncodeProperty(&writer, property, value),
"Invalid database state!");
}
// We need to recreate the tombstone (if possible).

View File

@ -8,6 +8,7 @@
#include "storage/v2/replication/enums.hpp"
#include "storage/v2/transaction.hpp"
#include "utils/file_locker.hpp"
#include "utils/logging.hpp"
namespace storage {
@ -68,22 +69,23 @@ void Storage::ReplicationClient::InitializeClient() {
}
}
if (branching_point) {
LOG(ERROR) << "Replica " << name_ << " cannot be used with this instance."
<< " Please start a clean instance of Memgraph server"
<< " on the specified endpoint.";
spdlog::error(
"Replica {} cannot be used with this instance. Please start a clean "
"instance of Memgraph server on the specified endpoint.",
name_);
return;
}
current_commit_timestamp = response.current_commit_timestamp;
DLOG(INFO) << "Current timestamp on replica: " << current_commit_timestamp;
DLOG(INFO) << "Current MAIN timestamp: "
<< storage_->last_commit_timestamp_.load();
spdlog::trace("Current timestamp on replica: {}", current_commit_timestamp);
spdlog::trace("Current timestamp on main: {}",
storage_->last_commit_timestamp_.load());
if (current_commit_timestamp == storage_->last_commit_timestamp_.load()) {
DLOG(INFO) << "Replica up to date";
spdlog::debug("Replica '{}' up to date", name_);
std::unique_lock client_guard{client_lock_};
replica_state_.store(replication::ReplicaState::READY);
} else {
DLOG(INFO) << "Replica is behind";
spdlog::debug("Replica '{}' is behind", name_);
{
std::unique_lock client_guard{client_lock_};
replica_state_.store(replication::ReplicaState::RECOVERY);
@ -99,13 +101,13 @@ void Storage::ReplicationClient::TryInitializeClient() {
} catch (const rpc::RpcFailedException &) {
std::unique_lock client_guarde{client_lock_};
replica_state_.store(replication::ReplicaState::INVALID);
LOG(ERROR) << "Failed to connect to replica " << name_ << " at "
<< rpc_client_->Endpoint();
spdlog::error("Failed to connect to replica {} at {}", name_,
rpc_client_->Endpoint());
}
}
void Storage::ReplicationClient::HandleRpcFailure() {
LOG(ERROR) << "Couldn't replicate data to " << name_;
spdlog::error("Couldn't replicate data to {}", name_);
thread_pool_.AddTask([this] {
rpc_client_->Abort();
this->TryInitializeClient();
@ -122,11 +124,11 @@ SnapshotRes Storage::ReplicationClient::TransferSnapshot(
WalFilesRes Storage::ReplicationClient::TransferWalFiles(
const std::vector<std::filesystem::path> &wal_files) {
CHECK(!wal_files.empty()) << "Wal files list is empty!";
MG_ASSERT(!wal_files.empty(), "Wal files list is empty!");
auto stream{rpc_client_->Stream<WalFilesRpc>(wal_files.size())};
replication::Encoder encoder(stream.GetBuilder());
for (const auto &wal : wal_files) {
DLOG(INFO) << "Sending wal file: " << wal;
spdlog::debug("Sending wal file: {}", wal);
encoder.WriteFile(wal);
}
@ -139,10 +141,10 @@ void Storage::ReplicationClient::StartTransactionReplication(
const auto status = replica_state_.load();
switch (status) {
case replication::ReplicaState::RECOVERY:
DLOG(INFO) << "Replica " << name_ << " is behind MAIN instance";
spdlog::debug("Replica {} is behind MAIN instance", name_);
return;
case replication::ReplicaState::REPLICATING:
DLOG(INFO) << "Replica " << name_ << " missed a transaction";
spdlog::debug("Replica {} missed a transaction", name_);
// We missed a transaction because we're still replicating
// the previous transaction so we need to go to RECOVERY
// state to catch up with the missing transaction
@ -156,7 +158,7 @@ void Storage::ReplicationClient::StartTransactionReplication(
HandleRpcFailure();
return;
case replication::ReplicaState::READY:
CHECK(!replica_stream_);
MG_ASSERT(!replica_stream_);
try {
replica_stream_.emplace(
ReplicaStream{this, storage_->last_commit_timestamp_.load(),
@ -204,9 +206,9 @@ void Storage::ReplicationClient::FinalizeTransactionReplication() {
thread_pool_.AddTask(
[this] { this->FinalizeTransactionReplicationInternal(); });
} else if (timeout_) {
CHECK(mode_ == replication::ReplicationMode::SYNC)
<< "Only SYNC replica can have a timeout.";
CHECK(timeout_dispatcher_) << "Timeout thread is missing";
MG_ASSERT(mode_ == replication::ReplicationMode::SYNC,
"Only SYNC replica can have a timeout.");
MG_ASSERT(timeout_dispatcher_, "Timeout thread is missing");
timeout_dispatcher_->WaitForTaskToFinish();
timeout_dispatcher_->active = true;
@ -246,7 +248,7 @@ void Storage::ReplicationClient::FinalizeTransactionReplication() {
}
void Storage::ReplicationClient::FinalizeTransactionReplicationInternal() {
CHECK(replica_stream_) << "Missing stream for transaction deltas";
MG_ASSERT(replica_stream_, "Missing stream for transaction deltas");
try {
auto response = replica_stream_->Finalize();
replica_stream_.reset();
@ -281,17 +283,13 @@ void Storage::ReplicationClient::RecoverReplica(uint64_t replica_commit) {
[&, this]<typename T>(T &&arg) {
using StepType = std::remove_cvref_t<T>;
if constexpr (std::is_same_v<StepType, RecoverySnapshot>) {
DLOG(INFO) << "Sending the latest snapshot file: " << arg;
spdlog::debug("Sending the latest snapshot file: {}", arg);
auto response = TransferSnapshot(arg);
replica_commit = response.current_commit_timestamp;
DLOG(INFO) << "Current timestamp on replica: "
<< replica_commit;
} else if constexpr (std::is_same_v<StepType, RecoveryWals>) {
DLOG(INFO) << "Sending the latest wal files";
spdlog::debug("Sending the latest wal files");
auto response = TransferWalFiles(arg);
replica_commit = response.current_commit_timestamp;
DLOG(INFO) << "Current timestamp on replica: "
<< replica_commit;
} else if constexpr (std::is_same_v<StepType,
RecoveryCurrentWal>) {
std::unique_lock transaction_guard(storage_->engine_lock_);
@ -300,10 +298,8 @@ void Storage::ReplicationClient::RecoverReplica(uint64_t replica_commit) {
arg.current_wal_seq_num) {
storage_->wal_file_->DisableFlushing();
transaction_guard.unlock();
DLOG(INFO) << "Sending current wal file";
spdlog::debug("Sending current wal file");
replica_commit = ReplicateCurrentWal();
DLOG(INFO)
<< "Current timestamp on replica: " << replica_commit;
storage_->wal_file_->EnableFlushing();
}
} else {
@ -322,6 +318,7 @@ void Storage::ReplicationClient::RecoverReplica(uint64_t replica_commit) {
}
}
spdlog::trace("Current timestamp on replica: {}", replica_commit);
// To avoid the situation where we read a correct commit timestamp in
// one thread, and after that another thread commits a different a
// transaction and THEN we set the state to READY in the first thread,
@ -332,8 +329,8 @@ void Storage::ReplicationClient::RecoverReplica(uint64_t replica_commit) {
// and we will go to recovery.
// By adding this lock, we can avoid that, and go to RECOVERY immediately.
std::unique_lock client_guard{client_lock_};
DLOG(INFO) << "Replica timestamp: " << replica_commit;
DLOG(INFO) << "Last commit: " << storage_->last_commit_timestamp_;
SPDLOG_INFO("Replica timestamp: {}", replica_commit);
SPDLOG_INFO("Last commit: {}", storage_->last_commit_timestamp_);
if (storage_->last_commit_timestamp_.load() == replica_commit) {
replica_state_.store(replication::ReplicaState::READY);
return;
@ -346,7 +343,8 @@ uint64_t Storage::ReplicationClient::ReplicateCurrentWal() {
auto stream = TransferCurrentWalFile();
stream.AppendFilename(wal_file->Path().filename());
utils::InputFile file;
CHECK(file.Open(wal_file->Path())) << "Failed to open current WAL file!";
MG_ASSERT(file.Open(storage_->wal_file_->Path()),
"Failed to open current WAL file!");
const auto [buffer, buffer_size] = wal_file->CurrentFileBuffer();
stream.AppendSize(file.GetSize() + buffer_size);
stream.AppendFileData(&file);
@ -393,7 +391,7 @@ Storage::ReplicationClient::GetRecoverySteps(
auto locker_acc = file_locker->Access();
auto wal_files = durability::GetWalFiles(
storage_->wal_directory_, storage_->uuid_, current_wal_seq_num);
CHECK(wal_files) << "Wal files could not be loaded";
MG_ASSERT(wal_files, "Wal files could not be loaded");
auto snapshot_files = durability::GetSnapshotFiles(
storage_->snapshot_directory_, storage_->uuid_);
@ -410,7 +408,7 @@ Storage::ReplicationClient::GetRecoverySteps(
if (wal_files->empty()) {
if (current_wal_from_timestamp &&
replica_commit >= *current_wal_from_timestamp) {
CHECK(current_wal_seq_num);
MG_ASSERT(current_wal_seq_num);
recovery_steps.emplace_back(RecoveryCurrentWal{*current_wal_seq_num});
return recovery_steps;
}
@ -426,7 +424,7 @@ Storage::ReplicationClient::GetRecoverySteps(
// if there are no finalized WAL files, snapshot left the current WAL
// as the WAL file containing a transaction before snapshot creation
// so we can be sure that the current WAL is present
CHECK(current_wal_seq_num);
MG_ASSERT(current_wal_seq_num);
recovery_steps.emplace_back(RecoveryCurrentWal{*current_wal_seq_num});
return recovery_steps;
}
@ -438,7 +436,7 @@ Storage::ReplicationClient::GetRecoverySteps(
// if the last finalized WAL is before the replica commit
// then we can recovery only from current WAL
if (rwal_it->to_timestamp <= replica_commit) {
CHECK(current_wal_seq_num);
MG_ASSERT(current_wal_seq_num);
recovery_steps.emplace_back(RecoveryCurrentWal{*current_wal_seq_num});
return recovery_steps;
}
@ -483,7 +481,7 @@ Storage::ReplicationClient::GetRecoverySteps(
previous_seq_num = rwal_it->seq_num;
}
CHECK(latest_snapshot) << "Invalid durability state, missing snapshot";
MG_ASSERT(latest_snapshot, "Invalid durability state, missing snapshot");
// We didn't manage to find a WAL chain, we need to send the latest snapshot
// with its WALs
locker_acc.AddPath(latest_snapshot->path);
@ -498,7 +496,8 @@ Storage::ReplicationClient::GetRecoverySteps(
// before its creation
if (latest_snapshot->start_timestamp < wal_it->to_timestamp) {
if (latest_snapshot->start_timestamp < wal_it->from_timestamp) {
CHECK(wal_it != wal_files->begin()) << "Invalid durability files state";
MG_ASSERT(wal_it != wal_files->begin(),
"Invalid durability files state");
--wal_it;
}
break;

View File

@ -27,27 +27,27 @@ Storage::ReplicationServer::ReplicationServer(
rpc_server_->Register<HeartbeatRpc>(
[this](auto *req_reader, auto *res_builder) {
DLOG(INFO) << "Received HeartbeatRpc";
spdlog::debug("Received HeartbeatRpc");
this->HeartbeatHandler(req_reader, res_builder);
});
rpc_server_->Register<AppendDeltasRpc>(
[this](auto *req_reader, auto *res_builder) {
DLOG(INFO) << "Received AppendDeltasRpc:";
spdlog::debug("Received AppendDeltasRpc");
this->AppendDeltasHandler(req_reader, res_builder);
});
rpc_server_->Register<SnapshotRpc>(
[this](auto *req_reader, auto *res_builder) {
DLOG(INFO) << "Received SnapshotRpc";
spdlog::debug("Received SnapshotRpc");
this->SnapshotHandler(req_reader, res_builder);
});
rpc_server_->Register<WalFilesRpc>(
[this](auto *req_reader, auto *res_builder) {
DLOG(INFO) << "Received WalFilesRpc";
spdlog::debug("Received WalFilesRpc");
this->WalFilesHandler(req_reader, res_builder);
});
rpc_server_->Register<CurrentWalRpc>(
[this](auto *req_reader, auto *res_builder) {
DLOG(INFO) << "Received CurrentWalRpc";
spdlog::debug("Received CurrentWalRpc");
this->CurrentWalHandler(req_reader, res_builder);
});
rpc_server_->Start();
@ -70,7 +70,7 @@ void Storage::ReplicationServer::AppendDeltasHandler(
replication::Decoder decoder(req_reader);
auto maybe_epoch_id = decoder.ReadString();
CHECK(maybe_epoch_id) << "Invalid replication message";
MG_ASSERT(maybe_epoch_id, "Invalid replication message");
if (*maybe_epoch_id != storage_->epoch_id_) {
storage_->epoch_history_.emplace_back(std::move(storage_->epoch_id_),
@ -82,7 +82,7 @@ void Storage::ReplicationServer::AppendDeltasHandler(
[&]() -> std::pair<uint64_t, durability::WalDeltaData> {
try {
auto timestamp = ReadWalDeltaHeader(&decoder);
DLOG(INFO) << " Timestamp " << timestamp;
SPDLOG_INFO(" Timestamp {}", timestamp);
auto delta = ReadWalDeltaData(&decoder);
return {timestamp, delta};
} catch (const slk::SlkReaderException &) {
@ -97,7 +97,7 @@ void Storage::ReplicationServer::AppendDeltasHandler(
// Empty the stream
bool transaction_complete = false;
while (!transaction_complete) {
DLOG(INFO) << "Skipping delta";
SPDLOG_INFO("Skipping delta");
const auto [timestamp, delta] = read_delta();
transaction_complete =
durability::IsWalDeltaDataTypeTransactionEnd(delta.type);
@ -115,8 +115,8 @@ void Storage::ReplicationServer::AppendDeltasHandler(
storage_->wal_file_.reset();
storage_->wal_seq_num_ = req.seq_num;
} else {
CHECK(storage_->wal_file_->SequenceNumber() == req.seq_num)
<< "Invalid sequence number of current wal file";
MG_ASSERT(storage_->wal_file_->SequenceNumber() == req.seq_num,
"Invalid sequence number of current wal file");
storage_->wal_seq_num_ = req.seq_num + 1;
}
} else {
@ -141,20 +141,20 @@ void Storage::ReplicationServer::AppendDeltasHandler(
bool transaction_complete = false;
for (uint64_t i = 0; !transaction_complete; ++i) {
DLOG(INFO) << " Delta " << i;
SPDLOG_INFO(" Delta {}", i);
const auto [timestamp, delta] = read_delta();
switch (delta.type) {
case durability::WalDeltaData::Type::VERTEX_CREATE: {
DLOG(INFO) << " Create vertex "
<< delta.vertex_create_delete.gid.AsUint();
spdlog::trace(" Create vertex {}",
delta.vertex_create_delete.gid.AsUint());
auto transaction = get_transaction(timestamp);
transaction->CreateVertex(delta.vertex_create_delete.gid);
break;
}
case durability::WalDeltaData::Type::VERTEX_DELETE: {
DLOG(INFO) << " Delete vertex "
<< delta.vertex_create_delete.gid.AsUint();
spdlog::trace(" Delete vertex {}",
delta.vertex_create_delete.gid.AsUint());
auto transaction = get_transaction(timestamp);
auto vertex = transaction->FindVertex(delta.vertex_create_delete.gid,
storage::View::NEW);
@ -165,9 +165,9 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::VERTEX_ADD_LABEL: {
DLOG(INFO) << " Vertex "
<< delta.vertex_add_remove_label.gid.AsUint()
<< " add label " << delta.vertex_add_remove_label.label;
spdlog::trace(" Vertex {} add label {}",
delta.vertex_add_remove_label.gid.AsUint(),
delta.vertex_add_remove_label.label);
auto transaction = get_transaction(timestamp);
auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid,
storage::View::NEW);
@ -179,9 +179,9 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::VERTEX_REMOVE_LABEL: {
DLOG(INFO) << " Vertex "
<< delta.vertex_add_remove_label.gid.AsUint()
<< " remove label " << delta.vertex_add_remove_label.label;
spdlog::trace(" Vertex {} remove label {}",
delta.vertex_add_remove_label.gid.AsUint(),
delta.vertex_add_remove_label.label);
auto transaction = get_transaction(timestamp);
auto vertex = transaction->FindVertex(delta.vertex_add_remove_label.gid,
storage::View::NEW);
@ -193,11 +193,10 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::VERTEX_SET_PROPERTY: {
DLOG(INFO) << " Vertex "
<< delta.vertex_edge_set_property.gid.AsUint()
<< " set property "
<< delta.vertex_edge_set_property.property << " to "
<< delta.vertex_edge_set_property.value;
spdlog::trace(" Vertex {} set property {} to {}",
delta.vertex_edge_set_property.gid.AsUint(),
delta.vertex_edge_set_property.property,
delta.vertex_edge_set_property.value);
auto transaction = get_transaction(timestamp);
auto vertex = transaction->FindVertex(
delta.vertex_edge_set_property.gid, storage::View::NEW);
@ -210,12 +209,12 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::EDGE_CREATE: {
DLOG(INFO) << " Create edge "
<< delta.edge_create_delete.gid.AsUint() << " of type "
<< delta.edge_create_delete.edge_type << " from vertex "
<< delta.edge_create_delete.from_vertex.AsUint()
<< " to vertex "
<< delta.edge_create_delete.to_vertex.AsUint();
spdlog::trace(
" Create edge {} of type {} from vertex {} to vertex {}",
delta.edge_create_delete.gid.AsUint(),
delta.edge_create_delete.edge_type,
delta.edge_create_delete.from_vertex.AsUint(),
delta.edge_create_delete.to_vertex.AsUint());
auto transaction = get_transaction(timestamp);
auto from_vertex = transaction->FindVertex(
delta.edge_create_delete.from_vertex, storage::View::NEW);
@ -232,12 +231,12 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::EDGE_DELETE: {
DLOG(INFO) << " Delete edge "
<< delta.edge_create_delete.gid.AsUint() << " of type "
<< delta.edge_create_delete.edge_type << " from vertex "
<< delta.edge_create_delete.from_vertex.AsUint()
<< " to vertex "
<< delta.edge_create_delete.to_vertex.AsUint();
spdlog::trace(
" Delete edge {} of type {} from vertex {} to vertex {}",
delta.edge_create_delete.gid.AsUint(),
delta.edge_create_delete.edge_type,
delta.edge_create_delete.from_vertex.AsUint(),
delta.edge_create_delete.to_vertex.AsUint());
auto transaction = get_transaction(timestamp);
auto from_vertex = transaction->FindVertex(
delta.edge_create_delete.from_vertex, storage::View::NEW);
@ -259,11 +258,10 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::EDGE_SET_PROPERTY: {
DLOG(INFO) << " Edge "
<< delta.vertex_edge_set_property.gid.AsUint()
<< " set property "
<< delta.vertex_edge_set_property.property << " to "
<< delta.vertex_edge_set_property.value;
spdlog::trace(" Edge {} set property {} to {}",
delta.vertex_edge_set_property.gid.AsUint(),
delta.vertex_edge_set_property.property,
delta.vertex_edge_set_property.value);
if (!storage_->config_.items.properties_on_edges)
throw utils::BasicException(
@ -334,7 +332,7 @@ void Storage::ReplicationServer::AppendDeltasHandler(
}
case durability::WalDeltaData::Type::TRANSACTION_END: {
DLOG(INFO) << " Transaction end";
spdlog::trace(" Transaction end");
if (!commit_timestamp_and_accessor ||
commit_timestamp_and_accessor->first != timestamp)
throw utils::BasicException("Invalid data!");
@ -346,8 +344,8 @@ void Storage::ReplicationServer::AppendDeltasHandler(
}
case durability::WalDeltaData::Type::LABEL_INDEX_CREATE: {
DLOG(INFO) << " Create label index on :"
<< delta.operation_label.label;
spdlog::trace(" Create label index on :{}",
delta.operation_label.label);
// Need to send the timestamp
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
@ -357,8 +355,8 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::LABEL_INDEX_DROP: {
DLOG(INFO) << " Drop label index on :"
<< delta.operation_label.label;
spdlog::trace(" Drop label index on :{}",
delta.operation_label.label);
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
if (!storage_->DropIndex(
@ -367,9 +365,9 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_CREATE: {
DLOG(INFO) << " Create label+property index on :"
<< delta.operation_label_property.label << " ("
<< delta.operation_label_property.property << ")";
spdlog::trace(" Create label+property index on :{} ({})",
delta.operation_label_property.label,
delta.operation_label_property.property);
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
if (!storage_->CreateIndex(
@ -381,9 +379,9 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::LABEL_PROPERTY_INDEX_DROP: {
DLOG(INFO) << " Drop label+property index on :"
<< delta.operation_label_property.label << " ("
<< delta.operation_label_property.property << ")";
spdlog::trace(" Drop label+property index on :{} ({})",
delta.operation_label_property.label,
delta.operation_label_property.property);
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
if (!storage_->DropIndex(
@ -395,9 +393,9 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::EXISTENCE_CONSTRAINT_CREATE: {
DLOG(INFO) << " Create existence constraint on :"
<< delta.operation_label_property.label << " ("
<< delta.operation_label_property.property << ")";
spdlog::trace(" Create existence constraint on :{} ({})",
delta.operation_label_property.label,
delta.operation_label_property.property);
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
auto ret = storage_->CreateExistenceConstraint(
@ -409,9 +407,9 @@ void Storage::ReplicationServer::AppendDeltasHandler(
break;
}
case durability::WalDeltaData::Type::EXISTENCE_CONSTRAINT_DROP: {
DLOG(INFO) << " Drop existence constraint on :"
<< delta.operation_label_property.label << " ("
<< delta.operation_label_property.property << ")";
spdlog::trace(" Drop existence constraint on :{} ({})",
delta.operation_label_property.label,
delta.operation_label_property.property);
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
if (!storage_->DropExistenceConstraint(
@ -425,9 +423,8 @@ void Storage::ReplicationServer::AppendDeltasHandler(
case durability::WalDeltaData::Type::UNIQUE_CONSTRAINT_CREATE: {
std::stringstream ss;
utils::PrintIterable(ss, delta.operation_label_properties.properties);
DLOG(INFO) << " Create unique constraint on :"
<< delta.operation_label_properties.label << " (" << ss.str()
<< ")";
spdlog::trace(" Create unique constraint on :{} ({})",
delta.operation_label_properties.label, ss.str());
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
std::set<PropertyId> properties;
@ -445,9 +442,8 @@ void Storage::ReplicationServer::AppendDeltasHandler(
case durability::WalDeltaData::Type::UNIQUE_CONSTRAINT_DROP: {
std::stringstream ss;
utils::PrintIterable(ss, delta.operation_label_properties.properties);
DLOG(INFO) << " Drop unique constraint on :"
<< delta.operation_label_properties.label << " (" << ss.str()
<< ")";
spdlog::trace(" Drop unique constraint on :{} ({})",
delta.operation_label_properties.label, ss.str());
if (commit_timestamp_and_accessor)
throw utils::BasicException("Invalid transaction!");
std::set<PropertyId> properties;
@ -484,8 +480,8 @@ void Storage::ReplicationServer::SnapshotHandler(slk::Reader *req_reader,
const auto maybe_snapshot_path =
decoder.ReadFile(storage_->snapshot_directory_);
CHECK(maybe_snapshot_path) << "Failed to load snapshot!";
DLOG(INFO) << "Received snapshot saved to " << *maybe_snapshot_path;
MG_ASSERT(maybe_snapshot_path, "Failed to load snapshot!");
spdlog::info("Received snapshot saved to {}", *maybe_snapshot_path);
std::unique_lock<utils::RWLock> storage_guard(storage_->main_lock_);
// Clear the database
@ -498,12 +494,12 @@ void Storage::ReplicationServer::SnapshotHandler(slk::Reader *req_reader,
storage_->indices_.label_property_index = LabelPropertyIndex(
&storage_->indices_, &storage_->constraints_, storage_->config_.items);
try {
DLOG(INFO) << "Loading snapshot";
spdlog::debug("Loading snapshot");
auto recovered_snapshot = durability::LoadSnapshot(
*maybe_snapshot_path, &storage_->vertices_, &storage_->edges_,
&storage_->epoch_history_, &storage_->name_id_mapper_,
&storage_->edge_count_, storage_->config_.items);
DLOG(INFO) << "Snapshot loaded successfully";
spdlog::debug("Snapshot loaded successfully");
// If this step is present it should always be the first step of
// the recovery so we use the UUID we read from snasphost
storage_->uuid_ = std::move(recovered_snapshot.snapshot_info.uuid);
@ -518,7 +514,7 @@ void Storage::ReplicationServer::SnapshotHandler(slk::Reader *req_reader,
recovered_snapshot.indices_constraints, &storage_->indices_,
&storage_->constraints_, &storage_->vertices_);
} catch (const durability::RecoveryFailure &e) {
LOG(FATAL) << "Couldn't load the snapshot because of: " << e.what();
LOG_FATAL("Couldn't load the snapshot because of: {}", e.what());
}
storage_guard.unlock();
@ -551,7 +547,7 @@ void Storage::ReplicationServer::WalFilesHandler(slk::Reader *req_reader,
slk::Load(&req, req_reader);
const auto wal_file_number = req.file_number;
DLOG(INFO) << "Received WAL files: " << wal_file_number;
spdlog::debug("Received WAL files: {}", wal_file_number);
replication::Decoder decoder(req_reader);
@ -609,8 +605,8 @@ void Storage::ReplicationServer::CurrentWalHandler(slk::Reader *req_reader,
// Delete the old wal file
storage_->file_retainer_.DeleteFile(storage_->wal_file_->Path());
}
CHECK(storage_->config_.durability.snapshot_wal_mode ==
Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL);
MG_ASSERT(storage_->config_.durability.snapshot_wal_mode ==
Config::Durability::SnapshotWalMode::PERIODIC_SNAPSHOT_WITH_WAL);
storage_->wal_file_.emplace(std::move(path), storage_->config_.items,
&storage_->name_id_mapper_, wal_info.seq_num,
wal_info.from_timestamp, wal_info.to_timestamp,
@ -629,8 +625,8 @@ Storage::ReplicationServer::LoadWal(
replication::Decoder *decoder,
durability::RecoveredIndicesAndConstraints *indices_constraints) {
auto maybe_wal_path = decoder->ReadFile(storage_->wal_directory_, "_MAIN");
CHECK(maybe_wal_path) << "Failed to load WAL!";
DLOG(INFO) << "Received WAL saved to " << *maybe_wal_path;
MG_ASSERT(maybe_wal_path, "Failed to load WAL!");
spdlog::trace("Received WAL saved to {}", *maybe_wal_path);
try {
auto wal_info = durability::ReadWalInfo(*maybe_wal_path);
if (wal_info.epoch_id != storage_->epoch_id_) {
@ -653,11 +649,11 @@ Storage::ReplicationServer::LoadWal(
if (info.last_commit_timestamp) {
storage_->last_commit_timestamp_ = *info.last_commit_timestamp;
}
DLOG(INFO) << *maybe_wal_path << " loaded successfully";
spdlog::debug("{} loaded successfully", *maybe_wal_path);
return {std::move(wal_info), std::move(*maybe_wal_path)};
} catch (const durability::RecoveryFailure &e) {
LOG(FATAL) << "Couldn't recover WAL deltas from " << *maybe_wal_path
<< " because of: " << e.what();
LOG_FATAL("Couldn't recover WAL deltas from {} because of: {}",
*maybe_wal_path, e.what());
}
}

View File

@ -48,8 +48,8 @@ void Encoder::WriteFileData(utils::InputFile *file) {
void Encoder::WriteFile(const std::filesystem::path &path) {
utils::InputFile file;
CHECK(file.Open(path)) << "Failed to open file " << path;
CHECK(path.has_filename()) << "Path does not have a filename!";
MG_ASSERT(file.Open(path), "Failed to open file {}", path);
MG_ASSERT(path.has_filename(), "Path does not have a filename!");
const auto &filename = path.filename().generic_string();
WriteString(filename);
auto file_size = file.GetSize();
@ -130,18 +130,18 @@ bool Decoder::SkipPropertyValue() {
std::optional<std::filesystem::path> Decoder::ReadFile(
const std::filesystem::path &directory, const std::string &suffix) {
CHECK(std::filesystem::exists(directory) &&
std::filesystem::is_directory(directory))
<< "Sent path for streamed files should be a valid directory!";
MG_ASSERT(std::filesystem::exists(directory) &&
std::filesystem::is_directory(directory),
"Sent path for streamed files should be a valid directory!");
utils::OutputFile file;
const auto maybe_filename = ReadString();
CHECK(maybe_filename) << "Filename missing for the file";
MG_ASSERT(maybe_filename, "Filename missing for the file");
const auto filename = *maybe_filename + suffix;
auto path = directory / filename;
file.Open(path, utils::OutputFile::Mode::OVERWRITE_EXISTING);
std::optional<size_t> maybe_file_size = ReadUint();
CHECK(maybe_file_size) << "File size missing";
MG_ASSERT(maybe_file_size, "File size missing");
auto file_size = *maybe_file_size;
uint8_t buffer[utils::kFileBufferSize];
while (file_size > 0) {

View File

@ -6,7 +6,6 @@
#include <variant>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "io/network/endpoint.hpp"
#include "storage/v2/durability/durability.hpp"
@ -18,6 +17,7 @@
#include "storage/v2/mvcc.hpp"
#include "storage/v2/replication/config.hpp"
#include "utils/file.hpp"
#include "utils/logging.hpp"
#include "utils/rw_lock.hpp"
#include "utils/spin_lock.hpp"
#include "utils/stat.hpp"
@ -32,7 +32,7 @@
namespace storage {
namespace {
constexpr uint16_t kEpochHistoryRetention = 1000;
[[maybe_unused]] constexpr uint16_t kEpochHistoryRetention = 1000;
} // namespace
auto AdvanceToVisibleVertex(utils::SkipList<Vertex>::Iterator it,
@ -348,12 +348,12 @@ Storage::Storage(Config config)
// holding the file opened.
lock_file_handle_.Open(lock_file_path_,
utils::OutputFile::Mode::OVERWRITE_EXISTING);
CHECK(lock_file_handle_.AcquireLock())
<< "Couldn't acquire lock on the storage directory "
<< config_.durability.storage_directory
<< "!\nAnother Memgraph process is currently running with the same "
"storage directory, please stop it first before starting this "
"process!";
MG_ASSERT(lock_file_handle_.AcquireLock(),
"Couldn't acquire lock on the storage directory {}"
"!\nAnother Memgraph process is currently running with the same "
"storage directory, please stop it first before starting this "
"process!",
config_.durability.storage_directory);
}
if (config_.durability.recover_on_startup) {
auto info = durability::RecoverData(
@ -390,19 +390,20 @@ Storage::Storage(Config config)
std::error_code item_error_code;
std::filesystem::rename(
item.path(), backup_curr / item.path().filename(), item_error_code);
CHECK(!item_error_code)
<< "Couldn't move " << what << " file " << item.path()
<< " because of: " << item_error_code.message();
MG_ASSERT(!item_error_code, "Couldn't move {} file {} because of: {}",
what, item.path(), item_error_code.message());
files_moved = true;
}
CHECK(!error_code) << "Couldn't backup " << what
<< " files because of: " << error_code.message();
MG_ASSERT(!error_code, "Couldn't backup {} files because of: {}", what,
error_code.message());
}
if (files_moved) {
spdlog::warn(
"Since Memgraph was not supposed to recover on startup and "
"durability is enabled, your current durability files will likely "
"be overridden. To prevent important data loss, Memgraph has stored "
"those files into a .backup directory inside the storage directory.");
}
LOG_IF(WARNING, files_moved)
<< "Since Memgraph was not supposed to recover on startup and "
"durability is enabled, your current durability files will likely "
"be overridden. To prevent important data loss, Memgraph has stored "
"those files into a .backup directory inside the storage directory.";
}
if (config_.durability.snapshot_wal_mode !=
Config::Durability::SnapshotWalMode::DISABLED) {
@ -469,8 +470,8 @@ VertexAccessor Storage::Accessor::CreateVertex() {
auto acc = storage_->vertices_.access();
auto delta = CreateDeleteObjectDelta(&transaction_);
auto [it, inserted] = acc.insert(Vertex{storage::Gid::FromUint(gid), delta});
CHECK(inserted) << "The vertex must be inserted here!";
CHECK(it != acc.end()) << "Invalid Vertex accessor!";
MG_ASSERT(inserted, "The vertex must be inserted here!");
MG_ASSERT(it != acc.end(), "Invalid Vertex accessor!");
delta->prev.Set(&*it);
return VertexAccessor(&*it, &transaction_, &storage_->indices_,
&storage_->constraints_, config_);
@ -491,8 +492,8 @@ VertexAccessor Storage::Accessor::CreateVertex(storage::Gid gid) {
auto acc = storage_->vertices_.access();
auto delta = CreateDeleteObjectDelta(&transaction_);
auto [it, inserted] = acc.insert(Vertex{gid, delta});
CHECK(inserted) << "The vertex must be inserted here!";
CHECK(it != acc.end()) << "Invalid Vertex accessor!";
MG_ASSERT(inserted, "The vertex must be inserted here!");
MG_ASSERT(it != acc.end(), "Invalid Vertex accessor!");
delta->prev.Set(&*it);
return VertexAccessor(&*it, &transaction_, &storage_->indices_,
&storage_->constraints_, config_);
@ -509,9 +510,9 @@ std::optional<VertexAccessor> Storage::Accessor::FindVertex(Gid gid,
}
Result<bool> Storage::Accessor::DeleteVertex(VertexAccessor *vertex) {
CHECK(vertex->transaction_ == &transaction_)
<< "VertexAccessor must be from the same transaction as the storage "
"accessor when deleting a vertex!";
MG_ASSERT(vertex->transaction_ == &transaction_,
"VertexAccessor must be from the same transaction as the storage "
"accessor when deleting a vertex!");
auto vertex_ptr = vertex->vertex_;
std::lock_guard<utils::SpinLock> guard(vertex_ptr->lock);
@ -531,9 +532,9 @@ Result<bool> Storage::Accessor::DeleteVertex(VertexAccessor *vertex) {
}
Result<bool> Storage::Accessor::DetachDeleteVertex(VertexAccessor *vertex) {
CHECK(vertex->transaction_ == &transaction_)
<< "VertexAccessor must be from the same transaction as the storage "
"accessor when deleting a vertex!";
MG_ASSERT(vertex->transaction_ == &transaction_,
"VertexAccessor must be from the same transaction as the storage "
"accessor when deleting a vertex!");
auto vertex_ptr = vertex->vertex_;
std::vector<std::tuple<EdgeTypeId, Vertex *, EdgeRef>> in_edges;
@ -557,8 +558,8 @@ Result<bool> Storage::Accessor::DetachDeleteVertex(VertexAccessor *vertex) {
&storage_->indices_, &storage_->constraints_, config_);
auto ret = DeleteEdge(&e);
if (ret.HasError()) {
CHECK(ret.GetError() == Error::SERIALIZATION_ERROR)
<< "Invalid database state!";
MG_ASSERT(ret.GetError() == Error::SERIALIZATION_ERROR,
"Invalid database state!");
return ret;
}
}
@ -568,8 +569,8 @@ Result<bool> Storage::Accessor::DetachDeleteVertex(VertexAccessor *vertex) {
&storage_->indices_, &storage_->constraints_, config_);
auto ret = DeleteEdge(&e);
if (ret.HasError()) {
CHECK(ret.GetError() == Error::SERIALIZATION_ERROR)
<< "Invalid database state!";
MG_ASSERT(ret.GetError() == Error::SERIALIZATION_ERROR,
"Invalid database state!");
return ret;
}
}
@ -583,7 +584,7 @@ Result<bool> Storage::Accessor::DetachDeleteVertex(VertexAccessor *vertex) {
if (!PrepareForWrite(&transaction_, vertex_ptr))
return Error::SERIALIZATION_ERROR;
CHECK(!vertex_ptr->deleted) << "Invalid database state!";
MG_ASSERT(!vertex_ptr->deleted, "Invalid database state!");
CreateAndLinkDelta(&transaction_, vertex_ptr, Delta::RecreateObjectTag());
vertex_ptr->deleted = true;
@ -594,12 +595,12 @@ Result<bool> Storage::Accessor::DetachDeleteVertex(VertexAccessor *vertex) {
Result<EdgeAccessor> Storage::Accessor::CreateEdge(VertexAccessor *from,
VertexAccessor *to,
EdgeTypeId edge_type) {
CHECK(from->transaction_ == to->transaction_)
<< "VertexAccessors must be from the same transaction when creating "
"an edge!";
CHECK(from->transaction_ == &transaction_)
<< "VertexAccessors must be from the same transaction in when "
"creating an edge!";
MG_ASSERT(from->transaction_ == to->transaction_,
"VertexAccessors must be from the same transaction when creating "
"an edge!");
MG_ASSERT(from->transaction_ == &transaction_,
"VertexAccessors must be from the same transaction in when "
"creating an edge!");
auto from_vertex = from->vertex_;
auto to_vertex = to->vertex_;
@ -636,8 +637,8 @@ Result<EdgeAccessor> Storage::Accessor::CreateEdge(VertexAccessor *from,
auto acc = storage_->edges_.access();
auto delta = CreateDeleteObjectDelta(&transaction_);
auto [it, inserted] = acc.insert(Edge(gid, delta));
CHECK(inserted) << "The edge must be inserted here!";
CHECK(it != acc.end()) << "Invalid Edge accessor!";
MG_ASSERT(inserted, "The edge must be inserted here!");
MG_ASSERT(it != acc.end(), "Invalid Edge accessor!");
edge = EdgeRef(&*it);
delta->prev.Set(&*it);
}
@ -662,12 +663,12 @@ Result<EdgeAccessor> Storage::Accessor::CreateEdge(VertexAccessor *from,
VertexAccessor *to,
EdgeTypeId edge_type,
storage::Gid gid) {
CHECK(from->transaction_ == to->transaction_)
<< "VertexAccessors must be from the same transaction when creating "
"an edge!";
CHECK(from->transaction_ == &transaction_)
<< "VertexAccessors must be from the same transaction in when "
"creating an edge!";
MG_ASSERT(from->transaction_ == to->transaction_,
"VertexAccessors must be from the same transaction when creating "
"an edge!");
MG_ASSERT(from->transaction_ == &transaction_,
"VertexAccessors must be from the same transaction in when "
"creating an edge!");
auto from_vertex = from->vertex_;
auto to_vertex = to->vertex_;
@ -713,8 +714,8 @@ Result<EdgeAccessor> Storage::Accessor::CreateEdge(VertexAccessor *from,
auto acc = storage_->edges_.access();
auto delta = CreateDeleteObjectDelta(&transaction_);
auto [it, inserted] = acc.insert(Edge(gid, delta));
CHECK(inserted) << "The edge must be inserted here!";
CHECK(it != acc.end()) << "Invalid Edge accessor!";
MG_ASSERT(inserted, "The edge must be inserted here!");
MG_ASSERT(it != acc.end(), "Invalid Edge accessor!");
edge = EdgeRef(&*it);
delta->prev.Set(&*it);
}
@ -736,9 +737,9 @@ Result<EdgeAccessor> Storage::Accessor::CreateEdge(VertexAccessor *from,
#endif
Result<bool> Storage::Accessor::DeleteEdge(EdgeAccessor *edge) {
CHECK(edge->transaction_ == &transaction_)
<< "EdgeAccessor must be from the same transaction as the storage "
"accessor when deleting an edge!";
MG_ASSERT(edge->transaction_ == &transaction_,
"EdgeAccessor must be from the same transaction as the storage "
"accessor when deleting an edge!");
auto edge_ref = edge->edge_;
auto edge_type = edge->edge_type_;
@ -773,12 +774,12 @@ Result<bool> Storage::Accessor::DeleteEdge(EdgeAccessor *edge) {
if (!PrepareForWrite(&transaction_, from_vertex))
return Error::SERIALIZATION_ERROR;
CHECK(!from_vertex->deleted) << "Invalid database state!";
MG_ASSERT(!from_vertex->deleted, "Invalid database state!");
if (to_vertex != from_vertex) {
if (!PrepareForWrite(&transaction_, to_vertex))
return Error::SERIALIZATION_ERROR;
CHECK(!to_vertex->deleted) << "Invalid database state!";
MG_ASSERT(!to_vertex->deleted, "Invalid database state!");
}
auto delete_edge_from_storage = [&edge_type, &edge_ref, this](auto *vertex,
@ -786,7 +787,7 @@ Result<bool> Storage::Accessor::DeleteEdge(EdgeAccessor *edge) {
std::tuple<EdgeTypeId, Vertex *, EdgeRef> link(edge_type, vertex, edge_ref);
auto it = std::find(edges->begin(), edges->end(), link);
if (config_.properties_on_edges) {
CHECK(it != edges->end()) << "Invalid database state!";
MG_ASSERT(it != edges->end(), "Invalid database state!");
} else if (it == edges->end()) {
return false;
}
@ -799,9 +800,9 @@ Result<bool> Storage::Accessor::DeleteEdge(EdgeAccessor *edge) {
auto op2 = delete_edge_from_storage(from_vertex, &to_vertex->in_edges);
if (config_.properties_on_edges) {
CHECK((op1 && op2)) << "Invalid database state!";
MG_ASSERT((op1 && op2), "Invalid database state!");
} else {
CHECK((op1 && op2) || (!op1 && !op2)) << "Invalid database state!";
MG_ASSERT((op1 && op2) || (!op1 && !op2), "Invalid database state!");
if (!op1 && !op2) {
// The edge is already deleted.
return false;
@ -855,8 +856,8 @@ void Storage::Accessor::AdvanceCommand() { ++transaction_.command_id; }
utils::BasicResult<ConstraintViolation, void> Storage::Accessor::Commit(
const std::optional<uint64_t> desired_commit_timestamp) {
CHECK(is_transaction_active_) << "The transaction is already terminated!";
CHECK(!transaction_.must_abort) << "The transaction can't be committed!";
MG_ASSERT(is_transaction_active_, "The transaction is already terminated!");
MG_ASSERT(!transaction_.must_abort, "The transaction can't be committed!");
if (transaction_.deltas.empty()) {
// We don't have to update the commit timestamp here because no one reads
@ -950,8 +951,8 @@ utils::BasicResult<ConstraintViolation, void> Storage::Accessor::Commit(
[&](auto &committed_transactions) {
// TODO: release lock, and update all deltas to have a local copy
// of the commit timestamp
CHECK(transaction_.commit_timestamp != nullptr)
<< "Invalid database state!";
MG_ASSERT(transaction_.commit_timestamp != nullptr,
"Invalid database state!");
transaction_.commit_timestamp->store(commit_timestamp,
std::memory_order_release);
#ifdef MG_ENTERPRISE
@ -986,7 +987,7 @@ utils::BasicResult<ConstraintViolation, void> Storage::Accessor::Commit(
}
void Storage::Accessor::Abort() {
CHECK(is_transaction_active_) << "The transaction is already terminated!";
MG_ASSERT(is_transaction_active_, "The transaction is already terminated!");
// We collect vertices and edges we've created here and then splice them into
// `deleted_vertices_` and `deleted_edges_` lists, instead of adding them one
@ -1008,7 +1009,7 @@ void Storage::Accessor::Abort() {
case Delta::Action::REMOVE_LABEL: {
auto it = std::find(vertex->labels.begin(), vertex->labels.end(),
current->label);
CHECK(it != vertex->labels.end()) << "Invalid database state!";
MG_ASSERT(it != vertex->labels.end(), "Invalid database state!");
std::swap(*it, *vertex->labels.rbegin());
vertex->labels.pop_back();
break;
@ -1016,7 +1017,7 @@ void Storage::Accessor::Abort() {
case Delta::Action::ADD_LABEL: {
auto it = std::find(vertex->labels.begin(), vertex->labels.end(),
current->label);
CHECK(it == vertex->labels.end()) << "Invalid database state!";
MG_ASSERT(it == vertex->labels.end(), "Invalid database state!");
vertex->labels.push_back(current->label);
break;
}
@ -1031,7 +1032,8 @@ void Storage::Accessor::Abort() {
current->vertex_edge.edge};
auto it = std::find(vertex->in_edges.begin(),
vertex->in_edges.end(), link);
CHECK(it == vertex->in_edges.end()) << "Invalid database state!";
MG_ASSERT(it == vertex->in_edges.end(),
"Invalid database state!");
vertex->in_edges.push_back(link);
break;
}
@ -1041,7 +1043,8 @@ void Storage::Accessor::Abort() {
current->vertex_edge.edge};
auto it = std::find(vertex->out_edges.begin(),
vertex->out_edges.end(), link);
CHECK(it == vertex->out_edges.end()) << "Invalid database state!";
MG_ASSERT(it == vertex->out_edges.end(),
"Invalid database state!");
vertex->out_edges.push_back(link);
// Increment edge count. We only increment the count here because
// the information in `ADD_IN_EDGE` and `Edge/RECREATE_OBJECT` is
@ -1056,7 +1059,8 @@ void Storage::Accessor::Abort() {
current->vertex_edge.edge};
auto it = std::find(vertex->in_edges.begin(),
vertex->in_edges.end(), link);
CHECK(it != vertex->in_edges.end()) << "Invalid database state!";
MG_ASSERT(it != vertex->in_edges.end(),
"Invalid database state!");
std::swap(*it, *vertex->in_edges.rbegin());
vertex->in_edges.pop_back();
break;
@ -1067,7 +1071,8 @@ void Storage::Accessor::Abort() {
current->vertex_edge.edge};
auto it = std::find(vertex->out_edges.begin(),
vertex->out_edges.end(), link);
CHECK(it != vertex->out_edges.end()) << "Invalid database state!";
MG_ASSERT(it != vertex->out_edges.end(),
"Invalid database state!");
std::swap(*it, *vertex->out_edges.rbegin());
vertex->out_edges.pop_back();
// Decrement edge count. We only decrement the count here because
@ -1124,7 +1129,7 @@ void Storage::Accessor::Abort() {
case Delta::Action::ADD_OUT_EDGE:
case Delta::Action::REMOVE_IN_EDGE:
case Delta::Action::REMOVE_OUT_EDGE: {
LOG(FATAL) << "Invalid database state!";
LOG_FATAL("Invalid database state!");
break;
}
}
@ -1549,7 +1554,7 @@ void Storage::CollectGarbage() {
guard = std::unique_lock<utils::SpinLock>(parent.edge->lock);
break;
case PreviousPtr::Type::DELTA:
LOG(FATAL) << "Invalid database state!";
LOG_FATAL("Invalid database state!");
}
}
if (delta.prev.Get() != prev) {
@ -1621,15 +1626,15 @@ void Storage::CollectGarbage() {
auto vertex_acc = vertices_.access();
while (!garbage_vertices_.empty() &&
garbage_vertices_.front().first < oldest_active_start_timestamp) {
CHECK(vertex_acc.remove(garbage_vertices_.front().second))
<< "Invalid database state!";
MG_ASSERT(vertex_acc.remove(garbage_vertices_.front().second),
"Invalid database state!");
garbage_vertices_.pop_front();
}
}
{
auto edge_acc = edges_.access();
for (auto edge : current_deleted_edges) {
CHECK(edge_acc.remove(edge)) << "Invalid database state!";
MG_ASSERT(edge_acc.remove(edge), "Invalid database state!");
}
}
}
@ -1881,7 +1886,7 @@ void Storage::AppendToWal(durability::StorageGlobalOperation operation,
void Storage::CreateSnapshot() {
#ifdef MG_ENTERPRISE
if (replication_role_.load() != ReplicationRole::MAIN) {
LOG(WARNING) << "Snapshots are disabled for replicas!";
spdlog::warn("Snapshots are disabled for replicas!");
return;
}
#endif
@ -1986,8 +1991,8 @@ utils::BasicResult<Storage::RegisterReplicaError> Storage::RegisterReplica(
std::string name, io::network::Endpoint endpoint,
const replication::ReplicationMode replication_mode,
const replication::ReplicationClientConfig &config) {
CHECK(replication_role_.load() == ReplicationRole::MAIN)
<< "Only main instance can register a replica!";
MG_ASSERT(replication_role_.load() == ReplicationRole::MAIN,
"Only main instance can register a replica!");
const bool name_exists = replication_clients_.WithLock([&](auto &clients) {
return std::any_of(clients.begin(), clients.end(),
@ -1998,9 +2003,9 @@ utils::BasicResult<Storage::RegisterReplicaError> Storage::RegisterReplica(
return RegisterReplicaError::NAME_EXISTS;
}
CHECK(replication_mode == replication::ReplicationMode::SYNC ||
!config.timeout)
<< "Only SYNC mode can have a timeout set";
MG_ASSERT(
replication_mode == replication::ReplicationMode::SYNC || !config.timeout,
"Only SYNC mode can have a timeout set");
auto client = std::make_unique<ReplicationClient>(
std::move(name), this, endpoint, replication_mode, config);
@ -2025,8 +2030,8 @@ utils::BasicResult<Storage::RegisterReplicaError> Storage::RegisterReplica(
}
bool Storage::UnregisterReplica(const std::string_view name) {
CHECK(replication_role_.load() == ReplicationRole::MAIN)
<< "Only main instance can unregister a replica!";
MG_ASSERT(replication_role_.load() == ReplicationRole::MAIN,
"Only main instance can unregister a replica!");
return replication_clients_.WithLock([&](auto &clients) {
return std::erase_if(
clients, [&](const auto &client) { return client->Name() == name; });

View File

@ -4,19 +4,18 @@
#include <tuple>
#include <vector>
#include "utils/spin_lock.hpp"
#include "storage/v2/delta.hpp"
#include "storage/v2/edge_ref.hpp"
#include "storage/v2/id_types.hpp"
#include "storage/v2/property_store.hpp"
#include "utils/spin_lock.hpp"
namespace storage {
struct Vertex {
Vertex(Gid gid, Delta *delta) : gid(gid), deleted(false), delta(delta) {
CHECK(delta == nullptr || delta->action == Delta::Action::DELETE_OBJECT)
<< "Vertex must be created with an initial DELETE_OBJECT delta!";
MG_ASSERT(delta == nullptr || delta->action == Delta::Action::DELETE_OBJECT,
"Vertex must be created with an initial DELETE_OBJECT delta!");
}
Gid gid;

View File

@ -6,15 +6,13 @@
#include "storage/v2/id_types.hpp"
#include "storage/v2/indices.hpp"
#include "storage/v2/mvcc.hpp"
#include "utils/logging.hpp"
namespace storage {
std::optional<VertexAccessor> VertexAccessor::Create(Vertex *vertex,
Transaction *transaction,
Indices *indices,
Constraints *constraints,
Config::Items config,
View view) {
std::optional<VertexAccessor> VertexAccessor::Create(
Vertex *vertex, Transaction *transaction, Indices *indices,
Constraints *constraints, Config::Items config, View view) {
bool is_visible = true;
Delta *delta = nullptr;
{
@ -104,14 +102,14 @@ Result<bool> VertexAccessor::HasLabel(LabelId label, View view) const {
switch (delta.action) {
case Delta::Action::REMOVE_LABEL: {
if (delta.label == label) {
CHECK(has_label) << "Invalid database state!";
MG_ASSERT(has_label, "Invalid database state!");
has_label = false;
}
break;
}
case Delta::Action::ADD_LABEL: {
if (delta.label == label) {
CHECK(!has_label) << "Invalid database state!";
MG_ASSERT(!has_label, "Invalid database state!");
has_label = true;
}
break;
@ -155,7 +153,7 @@ Result<std::vector<LabelId>> VertexAccessor::Labels(View view) const {
case Delta::Action::REMOVE_LABEL: {
// Remove the label because we don't see the addition.
auto it = std::find(labels.begin(), labels.end(), delta.label);
CHECK(it != labels.end()) << "Invalid database state!";
MG_ASSERT(it != labels.end(), "Invalid database state!");
std::swap(*it, *labels.rbegin());
labels.pop_back();
break;
@ -163,7 +161,7 @@ Result<std::vector<LabelId>> VertexAccessor::Labels(View view) const {
case Delta::Action::ADD_LABEL: {
// Add the label because we don't see the removal.
auto it = std::find(labels.begin(), labels.end(), delta.label);
CHECK(it == labels.end()) << "Invalid database state!";
MG_ASSERT(it == labels.end(), "Invalid database state!");
labels.push_back(delta.label);
break;
}
@ -335,8 +333,8 @@ Result<std::map<PropertyId, PropertyValue>> VertexAccessor::Properties(
Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(
View view, const std::vector<EdgeTypeId> &edge_types,
const VertexAccessor *destination) const {
CHECK(!destination || destination->transaction_ == transaction_)
<< "Invalid accessor!";
MG_ASSERT(!destination || destination->transaction_ == transaction_,
"Invalid accessor!");
bool exists = true;
bool deleted = false;
std::vector<std::tuple<EdgeTypeId, Vertex *, EdgeRef>> in_edges;
@ -376,7 +374,7 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(
delta.vertex_edge.edge_type, delta.vertex_edge.vertex,
delta.vertex_edge.edge};
auto it = std::find(in_edges.begin(), in_edges.end(), link);
CHECK(it == in_edges.end()) << "Invalid database state!";
MG_ASSERT(it == in_edges.end(), "Invalid database state!");
in_edges.push_back(link);
break;
}
@ -392,7 +390,7 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(
delta.vertex_edge.edge_type, delta.vertex_edge.vertex,
delta.vertex_edge.edge};
auto it = std::find(in_edges.begin(), in_edges.end(), link);
CHECK(it != in_edges.end()) << "Invalid database state!";
MG_ASSERT(it != in_edges.end(), "Invalid database state!");
std::swap(*it, *in_edges.rbegin());
in_edges.pop_back();
break;
@ -428,8 +426,8 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::InEdges(
Result<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(
View view, const std::vector<EdgeTypeId> &edge_types,
const VertexAccessor *destination) const {
CHECK(!destination || destination->transaction_ == transaction_)
<< "Invalid accessor!";
MG_ASSERT(!destination || destination->transaction_ == transaction_,
"Invalid accessor!");
bool exists = true;
bool deleted = false;
std::vector<std::tuple<EdgeTypeId, Vertex *, EdgeRef>> out_edges;
@ -469,7 +467,7 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(
delta.vertex_edge.edge_type, delta.vertex_edge.vertex,
delta.vertex_edge.edge};
auto it = std::find(out_edges.begin(), out_edges.end(), link);
CHECK(it == out_edges.end()) << "Invalid database state!";
MG_ASSERT(it == out_edges.end(), "Invalid database state!");
out_edges.push_back(link);
break;
}
@ -485,7 +483,7 @@ Result<std::vector<EdgeAccessor>> VertexAccessor::OutEdges(
delta.vertex_edge.edge_type, delta.vertex_edge.vertex,
delta.vertex_edge.edge};
auto it = std::find(out_edges.begin(), out_edges.end(), link);
CHECK(it != out_edges.end()) << "Invalid database state!";
MG_ASSERT(it != out_edges.end(), "Invalid database state!");
std::swap(*it, *out_edges.rbegin());
out_edges.pop_back();
break;

View File

@ -4,4 +4,4 @@ set(telemetry_src_files
system_info.cpp)
add_library(telemetry_lib STATIC ${telemetry_src_files})
target_link_libraries(telemetry_lib glog mg-requests mg-kvstore)
target_link_libraries(telemetry_lib mg-requests mg-kvstore mg-utils)

Some files were not shown because too many files have changed in this diff Show More