Use GLogger instead of broken memgraph Logger.

Summary:
http://rpg.ifi.uzh.ch/docs/glog.html

Second phase before tests complete.

Delete logging test.

Finish relase loging.

Reviewers: mislav.bradac, teon.banek, buda

Reviewed By: teon.banek

Subscribers: buda, pullbot

Differential Revision: https://phabricator.memgraph.io/D500
This commit is contained in:
Dominik Gleich 2017-06-21 11:29:13 +02:00
parent 19c0dfe084
commit e2f3aba332
103 changed files with 583 additions and 1252 deletions

View File

@ -21,6 +21,7 @@ BASE_FLAGS = [
'-I./include',
'-I./libs/fmt',
'-I./libs/yaml-cpp',
'-I./libs/glog/include',
'-I./libs/googletest/googletest/include',
'-I./libs/googletest/googlemock/include',
'-I./libs/benchmark/include',

View File

@ -158,56 +158,6 @@ if(CLANG_TIDY)
endif()
# -----------------------------------------------------------------------------
# logging levels --------------------------------------------------------------
option (LOG_NO_STDOUT
"Disable logging to stdout. (Docker has a bug with large logs.)"
OFF)
if (LOG_NO_STDOUT)
add_definitions(-DLOG_NO_STDOUT)
endif()
message(STATUS "LOG_NO_STDOUT: ${LOG_NO_STDOUT}")
option(LOG_NO_TRACE "Disable trace logging" OFF)
message(STATUS "LOG_NO_TRACE: ${LOG_NO_TRACE}")
if (LOG_NO_TRACE)
add_definitions(-DLOG_NO_TRACE)
endif()
option(LOG_NO_DEBUG "Disable debug logging" OFF)
message(STATUS "LOG_NO_DEBUG: ${LOG_NO_DEBUG}")
if (LOG_NO_DEBUG)
add_definitions(-DLOG_NO_DEBUG)
endif()
option(LOG_NO_INFO "Disable info logging" OFF)
message(STATUS "LOG_NO_INFO: ${LOG_NO_INFO}")
if (LOG_NO_INFO)
add_definitions(-DLOG_NO_INFO)
endif()
option(LOG_NO_WARN "Disable warn logging" OFF)
message(STATUS "LOG_NO_WARN: ${LOG_NO_WARN}")
if (LOG_NO_WARN)
add_definitions(-DLOG_NO_WARN)
endif()
option(LOG_NO_ERROR "Disable error logging" OFF)
message(STATUS "LOG_NO_ERROR: ${LOG_NO_ERROR}")
if (LOG_NO_ERROR)
add_definitions(-DLOG_NO_ERROR)
endif()
# -----------------------------------------------------------------------------
# logger type
# the default logger is sync logger
# on: cmake ... -DSYNC_LOGGER=OFF ... async logger is going to be used
option(SYNC_LOGGER "Sync logger" ON)
message(STATUS "SYNC_LOGGER: ${SYNC_LOGGER}")
if (SYNC_LOGGER)
add_definitions(-DSYNC_LOGGER)
endif()
# -----------------------------------------------------------------------------
# custom assert control parameters
# Debug assert, if value is OFF debug asserts will be inactive.
@ -340,13 +290,6 @@ set(memgraph_src_files
${src_dir}/io/network/addrinfo.cpp
${src_dir}/io/network/network_endpoint.cpp
${src_dir}/io/network/socket.cpp
${src_dir}/logging/default.cpp
${src_dir}/logging/levels.cpp
${src_dir}/logging/log.cpp
${src_dir}/logging/logs/async_log.cpp
${src_dir}/logging/logs/sync_log.cpp
${src_dir}/logging/streams/stderr.cpp
${src_dir}/logging/streams/stdout.cpp
${src_dir}/query/common.cpp
${src_dir}/query/console.cpp
${src_dir}/query/engine.cpp
@ -374,8 +317,8 @@ set(memgraph_src_files
# -----------------------------------------------------------------------------
# memgraph_lib and memgraph_pic depend on these libraries
set(MEMGRAPH_ALL_LIBS gflags stdc++fs Threads::Threads fmt
antlr_opencypher_parser_lib dl ${CMAKE_SOURCE_DIR}/libs/glog/lib/libglog.a)
set(MEMGRAPH_ALL_LIBS stdc++fs Threads::Threads fmt
antlr_opencypher_parser_lib dl ${CMAKE_SOURCE_DIR}/libs/glog/lib/libglog.a gflags)
if (READLINE_FOUND)
list(APPEND MEMGRAPH_ALL_LIBS ${READLINE_LIBRARY})
endif()
@ -385,12 +328,6 @@ add_library(memgraph_lib STATIC ${memgraph_src_files})
target_link_libraries(memgraph_lib ${MEMGRAPH_ALL_LIBS})
add_dependencies(memgraph_lib generate_opencypher_parser
glog)
# STATIC PIC library used by query engine
add_library(memgraph_pic STATIC ${memgraph_src_files})
target_link_libraries(memgraph_pic ${MEMGRAPH_ALL_LIBS})
add_dependencies(memgraph_pic generate_opencypher_parser)
set_property(TARGET memgraph_pic PROPERTY POSITION_INDEPENDENT_CODE TRUE)
# -----------------------------------------------------------------------------
# proof of concepts
@ -406,8 +343,8 @@ if (ALL_TESTS OR BENCHMARK_TESTS OR CONCURRENT_TEST OR INTEGRATION_TEST
endif()
# -----------------------------------------------------------------------------
execute_process(
COMMAND ./recursive_include --roots ${src_dir} ${libs_dir} ${CMAKE_BINARY_DIR}/libs/gflags/include --start ${src_dir}/query/plan_template_cpp --copy ${CMAKE_BINARY_DIR}/include
add_custom_command(TARGET memgraph_lib
COMMAND ./recursive_include --roots ${src_dir} ${libs_dir} ${CMAKE_BINARY_DIR}/libs/gflags/include ${libs_dir}/glog/include --start ${src_dir}/query/plan_template_cpp --copy ${CMAKE_BINARY_DIR}/include
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/cmake
)

View File

@ -19,7 +19,11 @@ add_subdirectory(googletest)
# setup google flags
set(GFLAGS_BUILD_gflags_nothreads_LIB OFF)
set(GFLAGS_BUILD_gflags_LIB ON)
# Gflags has to be position independant otherwise Glog complains.
set(CMAKE_CXX_FLAGS_SAVED ${CMAKE_CXX_FLAGS})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
add_subdirectory(gflags)
set(CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS_SAVED})
# Setup google logging after gflags (so that glog can use it).
# We need to use `ExternalProject`, because currently glog's CMakeLists.txt

View File

@ -7,7 +7,6 @@
#include "communication/bolt/v1/constants.hpp"
#include "io/network/stream_buffer.hpp"
#include "logging/loggable.hpp"
#include "utils/assert.hpp"
#include "utils/bswap.hpp"
@ -26,12 +25,12 @@ namespace communication::bolt {
* size of a chunk in the Bolt protocol
*/
template <size_t Size = WHOLE_CHUNK_SIZE>
class Buffer : public Loggable {
class Buffer {
private:
using StreamBufferT = io::network::StreamBuffer;
public:
Buffer() : Loggable("Buffer") {}
Buffer() = default;
/**
* Allocates a new StreamBuffer from the internal buffer.

View File

@ -7,7 +7,6 @@
#include "communication/bolt/v1/constants.hpp"
#include "communication/bolt/v1/decoder/buffer.hpp"
#include "logging/loggable.hpp"
#include "utils/assert.hpp"
namespace communication::bolt {
@ -38,13 +37,12 @@ enum class ChunkState : uint8_t {
* chunk for validity and then copies only data from the chunk. The headers
* aren't copied so that the decoder can read only the raw encoded data.
*/
class ChunkedDecoderBuffer : public Loggable {
class ChunkedDecoderBuffer {
private:
using StreamBufferT = io::network::StreamBuffer;
public:
ChunkedDecoderBuffer(Buffer<> &buffer)
: Loggable("ChunkedDecoderBuffer"), buffer_(buffer) {}
ChunkedDecoderBuffer(Buffer<> &buffer) : buffer_(buffer) {}
/**
* Reads data from the internal buffer.
@ -76,7 +74,7 @@ class ChunkedDecoderBuffer : public Loggable {
uint8_t *data = buffer_.data();
size_t size = buffer_.size();
if (size < 2) {
logger.trace("Size < 2");
DLOG(WARNING) << "Size < 2";
return ChunkState::Partial;
}
@ -84,13 +82,13 @@ class ChunkedDecoderBuffer : public Loggable {
chunk_size <<= 8;
chunk_size += data[1];
if (size < chunk_size + 4) {
logger.trace("Chunk size is {} but only have {} data bytes.", chunk_size,
size);
DLOG(WARNING) << fmt::format(
"Chunk size is {} but only have {} data bytes.", chunk_size, size);
return ChunkState::Partial;
}
if (data[chunk_size + 2] != 0 || data[chunk_size + 3] != 0) {
logger.trace("Invalid chunk!");
DLOG(WARNING) << "Invalid chunk!";
buffer_.Shift(chunk_size + 4);
return ChunkState::Invalid;
}

View File

@ -1,15 +1,15 @@
#pragma once
#include <string>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "database/graph_db_accessor.hpp"
#include "logging/default.hpp"
#include "logging/logger.hpp"
#include "query/typed_value.hpp"
#include "utils/bswap.hpp"
#include "utils/underlying_cast.hpp"
#include <string>
namespace communication::bolt {
/**
@ -43,10 +43,9 @@ struct DecodedEdge {
* @tparam Buffer the input buffer that should be used
*/
template <typename Buffer>
class Decoder : public Loggable {
class Decoder {
public:
Decoder(Buffer &buffer)
: Loggable("communication::bolt::Decoder"), buffer_(buffer) {}
Decoder(Buffer &buffer) : buffer_(buffer) {}
/**
* Reads a TypedValue from the available data in the buffer.
@ -59,10 +58,10 @@ class Decoder : public Loggable {
bool ReadTypedValue(query::TypedValue *data) {
uint8_t value;
logger.trace("[ReadTypedValue] Start");
DLOG(INFO) << "[ReadTypedValue] Start";
if (!buffer_.Read(&value, 1)) {
logger.debug("[ReadTypedValue] Marker data missing!");
DLOG(WARNING) << "[ReadTypedValue] Marker data missing!";
return false;
}
@ -125,11 +124,11 @@ class Decoder : public Loggable {
*/
bool ReadTypedValue(query::TypedValue *data, query::TypedValue::Type type) {
if (!ReadTypedValue(data)) {
logger.debug("[ReadTypedValue] ReadTypedValue call failed!");
DLOG(WARNING) << "[ReadTypedValue] ReadTypedValue call failed!";
return false;
}
if (data->type() != type) {
logger.debug("[ReadTypedValue] Typed value has wrong type!");
DLOG(WARNING) << "[ReadTypedValue] Typed value has wrong type!";
return false;
}
return true;
@ -147,16 +146,16 @@ class Decoder : public Loggable {
bool ReadMessageHeader(Signature *signature, Marker *marker) {
uint8_t values[2];
logger.trace("[ReadMessageHeader] Start");
DLOG(INFO) << "[ReadMessageHeader] Start";
if (!buffer_.Read(values, 2)) {
logger.debug("[ReadMessageHeader] Marker data missing!");
DLOG(WARNING) << "[ReadMessageHeader] Marker data missing!";
return false;
}
*marker = (Marker)values[0];
*signature = (Signature)values[1];
logger.trace("[ReadMessageHeader] Success");
DLOG(WARNING) << "[ReadMessageHeader] Success";
return true;
}
@ -172,40 +171,40 @@ class Decoder : public Loggable {
uint8_t value[2];
query::TypedValue tv;
logger.trace("[ReadVertex] Start");
DLOG(INFO) << "[ReadVertex] Start";
if (!buffer_.Read(value, 2)) {
logger.debug("[ReadVertex] Missing marker and/or signature data!");
DLOG(WARNING) << "[ReadVertex] Missing marker and/or signature data!";
return false;
}
// check header
if (value[0] != underlying_cast(Marker::TinyStruct) + 3) {
logger.debug("[ReadVertex] Received invalid marker ({})!", value[0]);
DLOG(WARNING) << "[ReadVertex] Received invalid marker " << value[0];
return false;
}
if (value[1] != underlying_cast(Signature::Node)) {
logger.debug("[ReadVertex] Received invalid signature ({})!", value[1]);
DLOG(WARNING) << "[ReadVertex] Received invalid signature " << value[1];
return false;
}
// read ID
if (!ReadTypedValue(&tv, query::TypedValue::Type::Int)) {
logger.debug("[ReadVertex] Couldn't read ID!");
DLOG(WARNING) << "[ReadVertex] Couldn't read ID!";
return false;
}
data->id = tv.Value<int64_t>();
// read labels
if (!ReadTypedValue(&tv, query::TypedValue::Type::List)) {
logger.debug("[ReadVertex] Couldn't read labels!");
DLOG(WARNING) << "[ReadVertex] Couldn't read labels!";
return false;
}
auto &labels = tv.Value<std::vector<query::TypedValue>>();
data->labels.resize(labels.size());
for (size_t i = 0; i < labels.size(); ++i) {
if (labels[i].type() != query::TypedValue::Type::String) {
logger.debug("[ReadVertex] Label has wrong type!");
DLOG(WARNING) << "[ReadVertex] Label has wrong type!";
return false;
}
data->labels[i] = labels[i].Value<std::string>();
@ -213,12 +212,12 @@ class Decoder : public Loggable {
// read properties
if (!ReadTypedValue(&tv, query::TypedValue::Type::Map)) {
logger.debug("[ReadVertex] Couldn't read properties!");
DLOG(WARNING) << "[ReadVertex] Couldn't read properties!";
return false;
}
data->properties = tv.Value<std::map<std::string, query::TypedValue>>();
logger.trace("[ReadVertex] Success");
DLOG(INFO) << "[ReadVertex] Success";
return true;
}
@ -235,59 +234,59 @@ class Decoder : public Loggable {
uint8_t value[2];
query::TypedValue tv;
logger.trace("[ReadEdge] Start");
DLOG(INFO) << "[ReadEdge] Start";
if (!buffer_.Read(value, 2)) {
logger.debug("[ReadEdge] Missing marker and/or signature data!");
DLOG(WARNING) << "[ReadEdge] Missing marker and/or signature data!";
return false;
}
// check header
if (value[0] != underlying_cast(Marker::TinyStruct) + 5) {
logger.debug("[ReadEdge] Received invalid marker ({})!", value[0]);
DLOG(WARNING) << "[ReadEdge] Received invalid marker " << value[0];
return false;
}
if (value[1] != underlying_cast(Signature::Relationship)) {
logger.debug("[ReadEdge] Received invalid signature ({})!", value[1]);
DLOG(WARNING) << "[ReadEdge] Received invalid signature " << value[1];
return false;
}
// read ID
if (!ReadTypedValue(&tv, query::TypedValue::Type::Int)) {
logger.debug("[ReadEdge] couldn't read ID!");
DLOG(WARNING) << "[ReadEdge] couldn't read ID!";
return false;
}
data->id = tv.Value<int64_t>();
// read from
if (!ReadTypedValue(&tv, query::TypedValue::Type::Int)) {
logger.debug("[ReadEdge] Couldn't read from_id!");
DLOG(WARNING) << "[ReadEdge] Couldn't read from_id!";
return false;
}
data->from = tv.Value<int64_t>();
// read to
if (!ReadTypedValue(&tv, query::TypedValue::Type::Int)) {
logger.debug("[ReadEdge] Couldn't read to_id!");
DLOG(WARNING) << "[ReadEdge] Couldn't read to_id!";
return false;
}
data->to = tv.Value<int64_t>();
// read type
if (!ReadTypedValue(&tv, query::TypedValue::Type::String)) {
logger.debug("[ReadEdge] Couldn't read type!");
DLOG(WARNING) << "[ReadEdge] Couldn't read type!";
return false;
}
data->type = tv.Value<std::string>();
// read properties
if (!ReadTypedValue(&tv, query::TypedValue::Type::Map)) {
logger.debug("[ReadEdge] Couldn't read properties!");
DLOG(WARNING) << "[ReadEdge] Couldn't read properties!";
return false;
}
data->properties = tv.Value<std::map<std::string, query::TypedValue>>();
logger.trace("[ReadEdge] Success");
DLOG(INFO) << "ReadEdge] Success";
return true;
}
@ -297,15 +296,15 @@ class Decoder : public Loggable {
private:
bool ReadNull(const Marker &marker, query::TypedValue *data) {
logger.trace("[ReadNull] Start");
DLOG(INFO) << "[ReadNull] Start";
debug_assert(marker == Marker::Null, "Received invalid marker!");
*data = query::TypedValue::Null;
logger.trace("[ReadNull] Success");
DLOG(INFO) << "[ReadNull] Success";
return true;
}
bool ReadBool(const Marker &marker, query::TypedValue *data) {
logger.trace("[ReadBool] Start");
DLOG(INFO) << "[ReadBool] Start";
debug_assert(marker == Marker::False || marker == Marker::True,
"Received invalid marker!");
if (marker == Marker::False) {
@ -313,7 +312,7 @@ class Decoder : public Loggable {
} else {
*data = query::TypedValue(true);
}
logger.trace("[ReadBool] Success");
DLOG(INFO) << "[ReadBool] Success";
return true;
}
@ -321,50 +320,50 @@ class Decoder : public Loggable {
uint8_t value = underlying_cast(marker);
bool success = true;
int64_t ret;
logger.trace("[ReadInt] Start");
DLOG(INFO) << "[ReadInt] Start";
if (value >= 240 || value <= 127) {
logger.trace("[ReadInt] Found a TinyInt");
DLOG(INFO) << "[ReadInt] Found a TinyInt";
ret = value;
if (value >= 240) ret -= 256;
} else if (marker == Marker::Int8) {
logger.trace("[ReadInt] Found an Int8");
DLOG(INFO) << "[ReadInt] Found an Int8";
int8_t tmp;
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&tmp), sizeof(tmp))) {
logger.debug("[ReadInt] Int8 missing data!");
DLOG(WARNING) << "[ReadInt] Int8 missing data!";
return false;
}
ret = tmp;
} else if (marker == Marker::Int16) {
logger.trace("[ReadInt] Found an Int16");
DLOG(INFO) << "[ReadInt] Found an Int16";
int16_t tmp;
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&tmp), sizeof(tmp))) {
logger.debug("[ReadInt] Int16 missing data!");
DLOG(WARNING) << "[ReadInt] Int16 missing data!";
return false;
}
ret = bswap(tmp);
} else if (marker == Marker::Int32) {
logger.trace("[ReadInt] Found an Int32");
DLOG(INFO) << "[ReadInt] Found an Int32";
int32_t tmp;
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&tmp), sizeof(tmp))) {
logger.debug("[ReadInt] Int32 missing data!");
DLOG(WARNING) << "[ReadInt] Int32 missing data!";
return false;
}
ret = bswap(tmp);
} else if (marker == Marker::Int64) {
logger.trace("[ReadInt] Found an Int64");
DLOG(INFO) << "[ReadInt] Found an Int64";
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&ret), sizeof(ret))) {
logger.debug("[ReadInt] Int64 missing data!");
DLOG(WARNING) << "[ReadInt] Int64 missing data!";
return false;
}
ret = bswap(ret);
} else {
logger.debug("[ReadInt] Received invalid marker ({})!",
underlying_cast(marker));
DLOG(WARNING) << "[ReadInt] Received invalid marker "
<< underlying_cast(marker);
return false;
}
if (success) {
*data = query::TypedValue(ret);
logger.trace("[ReadInt] Success");
DLOG(INFO) << "[ReadInt] Success";
}
return success;
}
@ -372,99 +371,99 @@ class Decoder : public Loggable {
bool ReadDouble(const Marker marker, query::TypedValue *data) {
uint64_t value;
double ret;
logger.trace("[ReadDouble] Start");
DLOG(INFO) << "[ReadDouble] Start";
debug_assert(marker == Marker::Float64, "Received invalid marker!");
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&value), sizeof(value))) {
logger.debug("[ReadDouble] Missing data!");
DLOG(WARNING) << "[ReadDouble] Missing data!";
return false;
}
value = bswap(value);
ret = *reinterpret_cast<double *>(&value);
*data = query::TypedValue(ret);
logger.trace("[ReadDouble] Success");
DLOG(INFO) << "[ReadDouble] Success";
return true;
}
int64_t ReadTypeSize(const Marker &marker, const uint8_t type) {
uint8_t value = underlying_cast(marker);
if ((value & 0xF0) == underlying_cast(MarkerTiny[type])) {
logger.trace("[ReadTypeSize] Found a TinyType");
DLOG(INFO) << "[ReadTypeSize] Found a TinyType";
return value & 0x0F;
} else if (marker == Marker8[type]) {
logger.trace("[ReadTypeSize] Found a Type8");
DLOG(INFO) << "[ReadTypeSize] Found a Type8";
uint8_t tmp;
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&tmp), sizeof(tmp))) {
logger.debug("[ReadTypeSize] Type8 missing data!");
DLOG(WARNING) << "[ReadTypeSize] Type8 missing data!";
return -1;
}
return tmp;
} else if (marker == Marker16[type]) {
logger.trace("[ReadTypeSize] Found a Type16");
DLOG(INFO) << "[ReadTypeSize] Found a Type16";
uint16_t tmp;
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&tmp), sizeof(tmp))) {
logger.debug("[ReadTypeSize] Type16 missing data!");
DLOG(WARNING) << "[ReadTypeSize] Type16 missing data!";
return -1;
}
tmp = bswap(tmp);
return tmp;
} else if (marker == Marker32[type]) {
logger.trace("[ReadTypeSize] Found a Type32");
DLOG(INFO) << "[ReadTypeSize] Found a Type32";
uint32_t tmp;
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&tmp), sizeof(tmp))) {
logger.debug("[ReadTypeSize] Type32 missing data!");
DLOG(WARNING) << "[ReadTypeSize] Type32 missing data!";
return -1;
}
tmp = bswap(tmp);
return tmp;
} else {
logger.debug("[ReadTypeSize] Received invalid marker ({})!",
underlying_cast(marker));
DLOG(WARNING) << "[ReadTypeSize] Received invalid marker "
<< underlying_cast(marker);
return -1;
}
}
bool ReadString(const Marker &marker, query::TypedValue *data) {
logger.trace("[ReadString] Start");
DLOG(INFO) << "[ReadString] Start";
auto size = ReadTypeSize(marker, MarkerString);
if (size == -1) {
logger.debug("[ReadString] Couldn't get size!");
DLOG(WARNING) << "[ReadString] Couldn't get size!";
return false;
}
std::unique_ptr<uint8_t[]> ret(new uint8_t[size]);
if (!buffer_.Read(ret.get(), size)) {
logger.debug("[ReadString] Missing data!");
DLOG(WARNING) << "[ReadString] Missing data!";
return false;
}
*data = query::TypedValue(
std::string(reinterpret_cast<char *>(ret.get()), size));
logger.trace("[ReadString] Success");
DLOG(INFO) << "[ReadString] Success";
return true;
}
bool ReadList(const Marker &marker, query::TypedValue *data) {
logger.trace("[ReadList] Start");
DLOG(INFO) << "[ReadList] Start";
auto size = ReadTypeSize(marker, MarkerList);
if (size == -1) {
logger.debug("[ReadList] Couldn't get size!");
DLOG(WARNING) << "[ReadList] Couldn't get size!";
return false;
}
std::vector<query::TypedValue> ret(size);
for (int64_t i = 0; i < size; ++i) {
if (!ReadTypedValue(&ret[i])) {
logger.debug("[ReadList] Couldn't read element {}", i);
DLOG(WARNING) << "[ReadList] Couldn't read element {}", i;
return false;
}
}
*data = query::TypedValue(ret);
logger.trace("[ReadList] Success");
DLOG(INFO) << "[ReadList] Success";
return true;
}
bool ReadMap(const Marker &marker, query::TypedValue *data) {
logger.trace("[ReadMap] Start");
DLOG(INFO) << "[ReadMap] Start";
auto size = ReadTypeSize(marker, MarkerMap);
if (size == -1) {
logger.debug("[ReadMap] Couldn't get size!");
DLOG(WARNING) << "[ReadMap] Couldn't get size!";
return false;
}
@ -473,29 +472,29 @@ class Decoder : public Loggable {
std::map<std::string, query::TypedValue> ret;
for (int64_t i = 0; i < size; ++i) {
if (!ReadTypedValue(&tv)) {
logger.debug("[ReadMap] Couldn't read index {}", i);
DLOG(WARNING) << "[ReadMap] Couldn't read index " << i;
return false;
}
if (tv.type() != query::TypedValue::Type::String) {
logger.debug("[ReadMap] Index {} isn't a string!", i);
DLOG(WARNING) << "[ReadMap] Index " << i << " isn't a string!";
return false;
}
str = tv.Value<std::string>();
if (!ReadTypedValue(&tv)) {
logger.debug("[ReadMap] Couldn't read element {}", i);
DLOG(WARNING) << "[ReadMap] Couldn't read element " << i;
return false;
}
ret.insert(std::make_pair(str, tv));
}
if (ret.size() != size) {
logger.debug(
"[ReadMap] The client sent multiple objects with same indexes!");
DLOG(WARNING)
<< "[ReadMap] The client sent multiple objects with same indexes!";
return false;
}
*data = query::TypedValue(ret);
logger.trace("[ReadMap] Success");
DLOG(INFO) << "[ReadMap] Success";
return true;
}
};

View File

@ -2,8 +2,6 @@
#include "communication/bolt/v1/codes.hpp"
#include "database/graph_db_accessor.hpp"
#include "logging/default.hpp"
#include "logging/logger.hpp"
#include "query/typed_value.hpp"
#include "utils/bswap.hpp"
@ -35,12 +33,10 @@ namespace communication::bolt {
* @tparam Buffer the output buffer that should be used
*/
template <typename Buffer>
class BaseEncoder : public Loggable {
class BaseEncoder {
public:
BaseEncoder(Buffer &buffer, bool encode_ids = false)
: Loggable("communication::bolt::BaseEncoder"),
buffer_(buffer),
encode_ids_(encode_ids) {}
: buffer_(buffer), encode_ids_(encode_ids) {}
void WriteRAW(const uint8_t *data, uint64_t len) { buffer_.Write(data, len); }

View File

@ -5,8 +5,9 @@
#include <memory>
#include <vector>
#include <glog/logging.h>
#include "communication/bolt/v1/constants.hpp"
#include "logging/loggable.hpp"
#include "utils/bswap.hpp"
namespace communication::bolt {
@ -36,10 +37,9 @@ namespace communication::bolt {
* @tparam Socket the output socket that should be used
*/
template <class Socket>
class ChunkedEncoderBuffer : public Loggable {
class ChunkedEncoderBuffer {
public:
ChunkedEncoderBuffer(Socket &socket)
: Loggable("Chunked Encoder Buffer"), socket_(socket) {}
ChunkedEncoderBuffer(Socket &socket) : socket_(socket) {}
/**
* Writes n values into the buffer. If n is bigger than whole chunk size
@ -119,7 +119,7 @@ class ChunkedEncoderBuffer : public Loggable {
// Flush the whole buffer.
if (!socket_.Write(buffer_.data() + offset_, size_ - offset_)) return false;
logger.trace("Flushed {} bytes.", size_);
DLOG(INFO) << "Flushed << " << size_ << " bytes.";
// Cleanup.
Clear();
@ -143,7 +143,7 @@ class ChunkedEncoderBuffer : public Loggable {
// Flush the first chunk
if (!socket_.Write(buffer_.data(), first_chunk_size_)) return false;
logger.trace("Flushed {} bytes.", first_chunk_size_);
DLOG(INFO) << "Flushed << " << first_chunk_size_ << " bytes.";
// Cleanup.
// Here we use offset as a method of deleting from the front of the

View File

@ -15,16 +15,13 @@ namespace communication::bolt {
template <typename Buffer>
class Encoder : private BaseEncoder<Buffer> {
private:
using Loggable::logger;
using BaseEncoder<Buffer>::WriteRAW;
using BaseEncoder<Buffer>::WriteList;
using BaseEncoder<Buffer>::WriteMap;
using BaseEncoder<Buffer>::buffer_;
public:
Encoder(Buffer &buffer) : BaseEncoder<Buffer>(buffer) {
logger = logging::log->logger("communication::bolt::Encoder");
}
Encoder(Buffer &buffer) : BaseEncoder<Buffer>(buffer) {}
/**
* Writes a Record message. This method only stores data in the Buffer.

View File

@ -4,8 +4,6 @@
#include "communication/bolt/v1/encoder/encoder.hpp"
#include "query/typed_value.hpp"
#include "logging/default.hpp"
namespace communication::bolt {
/**

View File

@ -20,8 +20,6 @@
#include "io/network/stream_buffer.hpp"
#include "logging/loggable.hpp"
namespace communication::bolt {
/**
@ -32,15 +30,14 @@ namespace communication::bolt {
* @tparam Socket type of socket (could be a network socket or test socket)
*/
template <typename Socket>
class Session : public Loggable {
class Session {
private:
using OutputStream = ResultStream<Encoder<ChunkedEncoderBuffer<Socket>>>;
using StreamBuffer = io::network::StreamBuffer;
public:
Session(Socket &&socket, Dbms &dbms, QueryEngine<OutputStream> &query_engine)
: Loggable("communication::bolt::Session"),
socket_(std::move(socket)),
: socket_(std::move(socket)),
dbms_(dbms),
query_engine_(query_engine),
encoder_buffer_(socket_),
@ -70,23 +67,25 @@ class Session : public Loggable {
// while there is data in the buffers
while (buffer_.size() > 0 || decoder_buffer_.Size() > 0) {
if (LIKELY(connected_)) {
logger.debug("Decoding chunk of size {}", buffer_.size());
DLOG(INFO) << fmt::format("Decoding chunk of size {}", buffer_.size());
auto chunk_state = decoder_buffer_.GetChunk();
if (chunk_state == ChunkState::Partial) {
logger.trace("Chunk isn't complete!");
DLOG(WARNING) << "Chunk isn't complete!";
return;
} else if (chunk_state == ChunkState::Invalid) {
logger.trace("Chunk is invalid!");
DLOG(WARNING) << "Chunk is invalid!";
ClientFailureInvalidData();
return;
}
// if chunk_state == ChunkState::Whole then we continue with
// execution of the select below
} else if (buffer_.size() < HANDSHAKE_SIZE) {
logger.debug("Received partial handshake of size {}", buffer_.size());
DLOG(WARNING) << fmt::format("Received partial handshake of size {}",
buffer_.size());
return;
} else {
logger.debug("Decoding handshake of size {}", buffer_.size());
DLOG(WARNING) << fmt::format("Decoding handshake of size {}",
buffer_.size());
}
switch (state_) {
@ -115,8 +114,9 @@ class Session : public Loggable {
return;
}
logger.trace("Buffer size: {}", buffer_.size());
logger.trace("Decoder buffer size: {}", decoder_buffer_.Size());
DLOG(INFO) << fmt::format("Buffer size: {}", buffer_.size());
DLOG(INFO) << fmt::format("Decoder buffer size: {}",
decoder_buffer_.Size());
}
}
@ -140,7 +140,7 @@ class Session : public Loggable {
* Closes the session (client socket).
*/
void Close() {
logger.debug("Closing session");
DLOG(INFO) << "Closing session";
this->socket_.Close();
}

View File

@ -1,8 +1,10 @@
#pragma once
#include <fmt/format.h>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/state.hpp"
#include "logging/default.hpp"
namespace communication::bolt {
@ -14,28 +16,27 @@ namespace communication::bolt {
*/
template <typename Session>
State StateErrorRun(Session &session) {
static Logger logger = logging::log->logger("State ERROR");
Marker marker;
Signature signature;
if (!session.decoder_.ReadMessageHeader(&signature, &marker)) {
logger.debug("Missing header data!");
DLOG(INFO) << "Missing header data!";
return State::Close;
}
logger.trace("Message signature is: 0x{:02X}", underlying_cast(signature));
DLOG(INFO) << fmt::format("Message signature is: 0x{:02X}",
underlying_cast(signature));
// clear the data buffer if it has any leftover data
session.encoder_buffer_.Clear();
if (signature == Signature::AckFailure || signature == Signature::Reset) {
if (signature == Signature::AckFailure)
logger.trace("AckFailure received");
DLOG(INFO) << "AckFailure received";
else
logger.trace("Reset received");
DLOG(INFO) << "Reset received";
if (!session.encoder_.MessageSuccess()) {
logger.debug("Couldn't send success message!");
DLOG(WARNING) << "Couldn't send success message!";
return State::Close;
}
return State::Executor;
@ -45,7 +46,8 @@ State StateErrorRun(Session &session) {
// all bolt client messages have less than 15 parameters
// so if we receive anything than a TinyStruct it's an error
if ((value & 0xF0) != underlying_cast(Marker::TinyStruct)) {
logger.debug("Expected TinyStruct marker, but received 0x{:02X}!", value);
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02X}!", value);
return State::Close;
}
@ -54,14 +56,15 @@ State StateErrorRun(Session &session) {
query::TypedValue tv;
for (int i = 0; i < value; ++i) {
if (!session.decoder_.ReadTypedValue(&tv)) {
logger.debug("Couldn't clean up parameter {} / {}!", i, value);
DLOG(WARNING) << fmt::format("Couldn't clean up parameter {} / {}!", i,
value);
return State::Close;
}
}
// ignore this message
if (!session.encoder_.MessageIgnored()) {
logger.debug("Couldn't send ignored message!");
DLOG(WARNING) << "Couldn't send ignored message!";
return State::Close;
}

View File

@ -2,9 +2,10 @@
#include <string>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/state.hpp"
#include "logging/default.hpp"
#include "query/exceptions.hpp"
#include "utils/exceptions.hpp"
@ -18,41 +19,39 @@ namespace communication::bolt {
*/
template <typename Session>
State StateExecutorRun(Session &session) {
// initialize logger
static Logger logger = logging::log->logger("State EXECUTOR");
Marker marker;
Signature signature;
if (!session.decoder_.ReadMessageHeader(&signature, &marker)) {
logger.debug("Missing header data!");
DLOG(WARNING) << "Missing header data!";
return State::Close;
}
if (signature == Signature::Run) {
if (marker != Marker::TinyStruct2) {
logger.debug("Expected TinyStruct2 marker, but received 0x{:02X}!",
underlying_cast(marker));
DLOG(WARNING) << fmt::format(
"Expected TinyStruct2 marker, but received 0x{:02X}!",
underlying_cast(marker));
return State::Close;
}
query::TypedValue query, params;
if (!session.decoder_.ReadTypedValue(&query,
query::TypedValue::Type::String)) {
logger.debug("Couldn't read query string!");
DLOG(WARNING) << "Couldn't read query string!";
return State::Close;
}
if (!session.decoder_.ReadTypedValue(&params,
query::TypedValue::Type::Map)) {
logger.debug("Couldn't read parameters!");
DLOG(WARNING) << "Couldn't read parameters!";
return State::Close;
}
auto db_accessor = session.dbms_.active();
logger.debug("[ActiveDB] '{}'", db_accessor->name());
DLOG(INFO) << fmt::format("[ActiveDB] '{}'", db_accessor->name());
try {
logger.trace("[Run] '{}'", query.Value<std::string>());
DLOG(INFO) << fmt::format("[Run] '{}'", query.Value<std::string>());
auto is_successfully_executed = session.query_engine_.Run(
query.Value<std::string>(), *db_accessor, session.output_stream_);
@ -73,10 +72,10 @@ State StateExecutorRun(Session &session) {
"concurrent access)"}});
if (!exec_fail_sent) {
logger.debug("Couldn't send failure message!");
DLOG(WARNING) << "Couldn't send failure message!";
return State::Close;
} else {
logger.debug("Query execution failed!");
DLOG(WARNING) << "Query execution failed!";
return State::Error;
}
} else {
@ -86,7 +85,7 @@ State StateExecutorRun(Session &session) {
// message which contains header data. The rest of this data (records
// and summary) will be sent after a PULL_ALL command from the client.
if (!session.encoder_buffer_.FlushFirstChunk()) {
logger.debug("Couldn't flush header data from the buffer!");
DLOG(WARNING) << "Couldn't flush header data from the buffer!";
return State::Close;
}
return State::Executor;
@ -98,9 +97,9 @@ State StateExecutorRun(Session &session) {
db_accessor->abort();
bool fail_sent = session.encoder_.MessageFailure(
{{"code", "Memgraph.Exception"}, {"message", e.what()}});
logger.debug("Error message: {}", e.what());
DLOG(WARNING) << fmt::format("Error message: {}", e.what());
if (!fail_sent) {
logger.debug("Couldn't send failure message!");
DLOG(WARNING) << "Couldn't send failure message!";
return State::Close;
}
return State::Error;
@ -111,10 +110,10 @@ State StateExecutorRun(Session &session) {
db_accessor->abort();
bool fail_sent = session.encoder_.MessageFailure(
{{"code", "Memgraph.Exception"}, {"message", e.what()}});
logger.debug("Error message: {}", e.what());
logger.debug("Error trace: {}", e.trace());
DLOG(WARNING) << fmt::format("Error message: {}", e.what());
DLOG(WARNING) << fmt::format("Error trace: {}", e.trace());
if (!fail_sent) {
logger.debug("Couldn't send failure message!");
DLOG(WARNING) << "Couldn't send failure message!";
return State::Close;
}
return State::Error;
@ -128,19 +127,20 @@ State StateExecutorRun(Session &session) {
{"message",
"An unknown exception occured, please contact your database "
"administrator."}});
logger.debug("std::exception {}", e.what());
DLOG(WARNING) << fmt::format("std::exception {}", e.what());
if (!fail_sent) {
logger.debug("Couldn't send failure message!");
DLOG(WARNING) << "Couldn't send failure message!";
return State::Close;
}
return State::Error;
}
} else if (signature == Signature::PullAll) {
logger.trace("[PullAll]");
DLOG(INFO) << "[PullAll]";
if (marker != Marker::TinyStruct) {
logger.debug("Expected TinyStruct marker, but received 0x{:02X}!",
underlying_cast(marker));
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02X}!",
underlying_cast(marker));
return State::Close;
}
if (!session.encoder_buffer_.HasData()) {
@ -151,7 +151,7 @@ State StateExecutorRun(Session &session) {
"There is no data to "
"send, you have to execute a RUN command before a PULL_ALL!"}});
if (!data_fail_sent) {
logger.debug("Couldn't send failure message!");
DLOG(WARNING) << "Couldn't send failure message!";
return State::Close;
}
return State::Error;
@ -159,22 +159,23 @@ State StateExecutorRun(Session &session) {
// flush pending data to the client, the success message is streamed
// from the query engine, it contains statistics from the query run
if (!session.encoder_buffer_.Flush()) {
logger.debug("Couldn't flush data from the buffer!");
DLOG(WARNING) << "Couldn't flush data from the buffer!";
return State::Close;
}
return State::Executor;
} else if (signature == Signature::DiscardAll) {
logger.trace("[DiscardAll]");
DLOG(INFO) << "[DiscardAll]";
if (marker != Marker::TinyStruct) {
logger.debug("Expected TinyStruct marker, but received 0x{:02X}!",
underlying_cast(marker));
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02X}!",
underlying_cast(marker));
return State::Close;
}
// clear all pending data and send a success message
session.encoder_buffer_.Clear();
if (!session.encoder_.MessageSuccess()) {
logger.debug("Couldn't send success message!");
DLOG(WARNING) << "Couldn't send success message!";
return State::Close;
}
return State::Executor;
@ -190,21 +191,22 @@ State StateExecutorRun(Session &session) {
// now this command only resets the session to a clean state. It
// does not IGNORE running and pending commands as it should.
if (marker != Marker::TinyStruct) {
logger.debug("Expected TinyStruct marker, but received 0x{:02X}!",
underlying_cast(marker));
DLOG(WARNING) << fmt::format(
"Expected TinyStruct marker, but received 0x{:02X}!",
underlying_cast(marker));
return State::Close;
}
// clear all pending data and send a success message
session.encoder_buffer_.Clear();
if (!session.encoder_.MessageSuccess()) {
logger.debug("Couldn't send success message!");
DLOG(WARNING) << "Couldn't send success message!";
return State::Close;
}
return State::Executor;
} else {
logger.debug("Unrecognized signature recieved (0x{:02X})!",
underlying_cast(signature));
DLOG(WARNING) << fmt::format("Unrecognized signature recieved (0x{:02X})!",
underlying_cast(signature));
return State::Close;
}
}

View File

@ -1,7 +1,8 @@
#pragma once
#include <glog/logging.h>
#include "communication/bolt/v1/state.hpp"
#include "logging/default.hpp"
namespace communication::bolt {
@ -15,11 +16,9 @@ static constexpr uint8_t protocol[4] = {0x00, 0x00, 0x00, 0x01};
*/
template <typename Session>
State StateHandshakeRun(Session &session) {
static Logger logger = logging::log->logger("State HANDSHAKE");
auto precmp = memcmp(session.buffer_.data(), preamble, sizeof(preamble));
if (UNLIKELY(precmp != 0)) {
logger.debug("Received a wrong preamble!");
DLOG(WARNING) << "Received a wrong preamble!";
return State::Close;
}
@ -28,7 +27,7 @@ State StateHandshakeRun(Session &session) {
// this will change in the future
if (!session.socket_.Write(protocol, sizeof(protocol))) {
logger.debug("Couldn't write handshake response!");
DLOG(WARNING) << "Couldn't write handshake response!";
return State::Close;
}
session.connected_ = true;

View File

@ -1,9 +1,11 @@
#pragma once
#include <fmt/format.h>
#include <glog/logging.h>
#include "communication/bolt/v1/codes.hpp"
#include "communication/bolt/v1/encoder/result_stream.hpp"
#include "communication/bolt/v1/state.hpp"
#include "logging/default.hpp"
#include "utils/likely.hpp"
namespace communication::bolt {
@ -15,49 +17,52 @@ namespace communication::bolt {
*/
template <typename Session>
State StateInitRun(Session &session) {
static Logger logger = logging::log->logger("State INIT");
logger.debug("Parsing message");
DLOG(INFO) << "Parsing message";
Marker marker;
Signature signature;
if (!session.decoder_.ReadMessageHeader(&signature, &marker)) {
logger.debug("Missing header data!");
DLOG(WARNING) << "Missing header data!";
return State::Close;
}
if (UNLIKELY(signature != Signature::Init)) {
logger.debug("Expected Init signature, but received 0x{:02X}!",
underlying_cast(signature));
DLOG(WARNING) << fmt::format(
"Expected Init signature, but received 0x{:02X}!",
underlying_cast(signature));
return State::Close;
}
if (UNLIKELY(marker != Marker::TinyStruct2)) {
logger.debug("Expected TinyStruct2 marker, but received 0x{:02X}!",
underlying_cast(marker));
logger.warn("The client sent malformed data, but we are continuing "
"because the official Neo4j Java driver sends malformed "
"data. D'oh!");
DLOG(WARNING) << fmt::format(
"Expected TinyStruct2 marker, but received 0x{:02X}!",
underlying_cast(marker));
DLOG(WARNING) << "The client sent malformed data, but we are continuing "
"because the official Neo4j Java driver sends malformed "
"data. D'oh!";
// TODO: this should be uncommented when the Neo4j Java driver is fixed
//return State::Close;
// return State::Close;
}
query::TypedValue client_name;
if (!session.decoder_.ReadTypedValue(&client_name,
query::TypedValue::Type::String)) {
logger.debug("Couldn't read client name!");
DLOG(WARNING) << "Couldn't read client name!";
return State::Close;
}
query::TypedValue metadata;
if (!session.decoder_.ReadTypedValue(&metadata,
query::TypedValue::Type::Map)) {
logger.debug("Couldn't read metadata!");
DLOG(WARNING) << "Couldn't read metadata!";
return State::Close;
}
logger.debug("Client connected '{}'", client_name.Value<std::string>());
LOG(INFO) << fmt::format("Client connected '{}'",
client_name.Value<std::string>())
<< std::endl;
if (!session.encoder_.MessageSuccess()) {
logger.debug("Couldn't send success message to the client!");
DLOG(WARNING) << "Couldn't send success message to the client!";
return State::Close;
}

View File

@ -6,12 +6,14 @@
#include <thread>
#include <vector>
#include <fmt/format.h>
#include <glog/logging.h>
#include "dbms/dbms.hpp"
#include "query/engine.hpp"
#include "communication/worker.hpp"
#include "io/network/event_listener.hpp"
#include "logging/default.hpp"
#include "utils/assert.hpp"
namespace communication {
@ -44,8 +46,7 @@ class Server
Server(Socket &&socket, Dbms &dbms, QueryEngine<OutputStream> &query_engine)
: socket_(std::forward<Socket>(socket)),
dbms_(dbms),
query_engine_(query_engine),
logger_(logging::log->logger("communication::Server")) {
query_engine_(query_engine) {
event_.data.fd = socket_;
// TODO: EPOLLET is hard to use -> figure out how should EPOLLET be used
@ -56,7 +57,7 @@ class Server
}
void Start(size_t n) {
logger_.info("Starting {} workers", n);
std::cout << fmt::format("Starting {} workers", n) << std::endl;
workers_.reserve(n);
for (size_t i = 0; i < n; ++i) {
workers_.push_back(
@ -64,24 +65,16 @@ class Server
dbms_, query_engine_));
workers_.back()->Start(alive_);
}
#ifdef LOG_NO_STDOUT
// TODO: Remove this when we switch to glog.
std::cout << "Server is fully armed and operational" << std::endl
<< "Listening on " << socket_.endpoint().address() << " at "
<< socket_.endpoint().port() << std::endl;
#endif
logger_.info("Server is fully armed and operational");
logger_.info("Listening on {} at {}", socket_.endpoint().address(),
socket_.endpoint().port());
std::cout << "Server is fully armed and operational" << std::endl;
std::cout << fmt::format("Listening on {} at {}",
socket_.endpoint().address(),
socket_.endpoint().port())
<< std::endl;
while (alive_) {
this->WaitAndProcessEvents();
}
#ifdef LOG_NO_STDOUT
// TODO: Remove this when we switch to glog.
std::cout << "Shutting down..." << std::endl;
#endif
logger_.info("Shutting down...");
for (auto &worker : workers_) worker->thread_.join();
}
@ -94,7 +87,7 @@ class Server
void OnConnect() {
debug_assert(idx_ < workers_.size(), "Invalid worker id.");
logger_.trace("on connect");
DLOG(INFO) << "on connect";
if (UNLIKELY(!workers_[idx_]->Accept(socket_))) return;
@ -112,7 +105,7 @@ class Server
template <class... Args>
void OnExceptionEvent(Event &event, Args &&... args) {
// TODO: Do something about it
logger_.warn("epoll exception");
DLOG(WARNING) << "epoll exception";
}
void OnCloseEvent(Event &event) { close(event.data.fd); }
@ -128,7 +121,6 @@ class Server
Dbms &dbms_;
QueryEngine<OutputStream> &query_engine_;
Event event_;
Logger logger_;
};
} // namespace communication

View File

@ -7,13 +7,14 @@
#include <sstream>
#include <thread>
#include <glog/logging.h>
#include "dbms/dbms.hpp"
#include "query/engine.hpp"
#include "communication/bolt/v1/session.hpp"
#include "io/network/network_error.hpp"
#include "io/network/stream_reader.hpp"
#include "logging/default.hpp"
namespace communication {
@ -44,12 +45,10 @@ class Worker
using sptr = std::shared_ptr<Worker<Session, OutputStream, Socket>>;
Worker(Dbms &dbms, QueryEngine<OutputStream> &query_engine)
: dbms_(dbms),
query_engine_(query_engine),
logger_(logging::log->logger("communication::Worker")) {}
: dbms_(dbms), query_engine_(query_engine) {}
Session &OnConnect(Socket &&socket) {
logger_.trace("Accepting connection on socket {}", socket.id());
DLOG(INFO) << "Accepting connection on socket " << socket.id();
// TODO fix session lifecycle handling
// dangling pointers are not cool :)
@ -58,28 +57,29 @@ class Worker
}
void OnError(Session &session) {
logger_.error("Error occured in this session");
LOG(ERROR) << "Error occured in this session";
OnClose(session);
}
void OnWaitTimeout() {}
void OnRead(Session &session) {
logger_.trace("OnRead");
DLOG(INFO) << "OnRead";
try {
session.Execute();
} catch (const std::exception &e) {
logger_.error("Error occured while executing statement.");
logger_.error("{}", e.what());
LOG(ERROR) << "Error occured while executing statement. " << std::endl
<< e.what();
// TODO: report to client
}
}
void OnClose(Session &session) {
logger_.info("Client {}:{} closed the connection.",
session.socket_.endpoint().address(),
session.socket_.endpoint().port());
std::cout << fmt::format("Client {}:{} closed the connection.",
session.socket_.endpoint().address(),
session.socket_.endpoint().port())
<< std::endl;
// TODO: remove socket from epoll object
session.Close();
delete &session;
@ -87,8 +87,7 @@ class Worker
template <class... Args>
void OnException(Session &session, Args &&... args) {
logger_.error("Error occured in this session");
logger_.error(args...);
LOG(ERROR) << "Error occured in this session";
// TODO: Do something about it
}
@ -104,6 +103,5 @@ class Worker
private:
Dbms &dbms_;
QueryEngine<OutputStream> &query_engine_;
Logger logger_;
};
}

View File

@ -5,8 +5,8 @@
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
#include "logging/logger.hpp"
#include "logging/streams/stdout.hpp"
#include <glog/logging.h>
#include "query/frontend/stripped.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/exceptions.hpp"
@ -48,22 +48,18 @@ std::string ExtractQuery(const fs::path &path) {
}
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
auto logger = logging::log->logger("CopyHardcodedQueries");
logger.info("{}", logging::log->type());
google::InitGoogleLogging(argv[0]);
REGISTER_ARGS(argc, argv);
auto src_path = fs::path(
GET_ARG("--src", "tests/integration/hardcoded_queries").get_string());
logger.info("Src path is: {}", src_path);
LOG(INFO) << "Src path is: " << src_path;
permanent_assert(fs::exists(src_path), "src folder must exist");
auto dst_path =
fs::path(GET_ARG("--dst", "build/compiled/hardcode").get_string());
logger.info("Dst path is: {}", dst_path);
LOG(INFO) << "Dst path is: " << dst_path;
fs::create_directories(dst_path);
auto src_files = utils::LoadFilePaths(src_path, "cpp");
@ -73,7 +69,7 @@ int main(int argc, char **argv) {
auto query_hash = query::StrippedQuery(query).hash();
auto dst_file = dst_path / fs::path(std::to_string(query_hash) + ".cpp");
fs::copy(src_file, dst_file, fs::copy_options::overwrite_existing);
logger.info("{} - (copy) -> {}", src_file, dst_file);
LOG(INFO) << src_file << "- {(copy) -> " << dst_file;
}
auto hpp_files = utils::LoadFilePaths(src_path, "hpp");

View File

@ -6,10 +6,10 @@
#include <mutex>
#include <utility>
#include "gflags/gflags.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "data_structures/concurrent/push_queue.hpp"
#include "logging/loggable.hpp"
#include "threading/sync/spinlock.hpp"
#include "utils/executioner.hpp"
@ -28,9 +28,9 @@ DECLARE_int32(skiplist_gc_interval);
* collected.
*/
template <class TNode>
class SkipListGC : public Loggable {
class SkipListGC {
public:
explicit SkipListGC() : Loggable("SkipListGc") {
explicit SkipListGC() {
executor_job_id_ = GetExecutioner().RegisterJob(
std::bind(&SkipListGC::GarbageCollect, this));
}
@ -133,7 +133,7 @@ class SkipListGC : public Loggable {
++destroyed;
}
oldest_not_deletable.delete_tail();
if (destroyed) logger.trace("Number of destroyed elements: {}", destroyed);
if (destroyed) DLOG(INFO) << "Number of destroyed elements: " << destroyed;
}
/**

View File

@ -1,12 +1,12 @@
#include <functional>
#include "gflags/gflags.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "database/creation_exception.hpp"
#include "database/graph_db.hpp"
#include "database/graph_db_accessor.hpp"
#include "durability/recovery.hpp"
#include "logging/logger.hpp"
#include "storage/edge.hpp"
#include "storage/garbage_collector.hpp"
@ -18,14 +18,12 @@ DEFINE_int32(max_retained_snapshots, -1,
DEFINE_int32(snapshot_cycle_sec, -1,
"Amount of time between starts of two snapshooters in seconds. -1 "
"to turn off.");
DEFINE_bool(snapshot_on_db_exit, false,
"Snapshot on exiting the database.");
DEFINE_bool(snapshot_on_db_exit, false, "Snapshot on exiting the database.");
DECLARE_string(snapshot_directory);
GraphDb::GraphDb(const std::string &name, const fs::path &snapshot_db_dir)
: Loggable("GraphDb"),
name_(name),
: name_(name),
gc_vertices_(vertices_, vertex_record_deleter_,
vertex_version_list_deleter_),
gc_edges_(edges_, edge_record_deleter_, edge_version_list_deleter_) {
@ -87,13 +85,14 @@ void GraphDb::RecoverDatabase(const fs::path &snapshot_db_dir) {
Recovery recovery;
for (auto &snapshot_file : snapshots) {
GraphDbAccessor db_accessor(*this);
logger.info("Starting database recovery from snapshot {}...",
snapshot_file);
std::cout << "Starting database recovery from snapshot " << snapshot_file
<< std::endl;
if (recovery.Recover(snapshot_file.string(), db_accessor)) {
logger.info("Recovery successful.");
std::cout << "Recovery successful." << std::endl;
return;
} else {
logger.error("Recovery unsuccessful, trying older snapshot...");
LOG(ERROR) << "Recovery unsuccessful, trying older snapshot..."
<< std::endl;
}
}
}
@ -109,14 +108,14 @@ GraphDb::~GraphDb() {
// Create last database snapshot
if (FLAGS_snapshot_on_db_exit == true) {
GraphDbAccessor db_accessor(*this);
logger.info("Creating snapshot on shutdown...");
LOG(INFO) << "Creating snapshot on shutdown..." << std::endl;
const bool status = snapshooter_.MakeSnapshot(
db_accessor, fs::path(FLAGS_snapshot_directory) / name_,
FLAGS_max_retained_snapshots);
if (status) {
logger.info("Snapshot created successfully.");
std::cout << "Snapshot created successfully." << std::endl;
} else {
logger.error("Snapshot creation failed!");
LOG(ERROR) << "Snapshot creation failed!" << std::endl;
}
}

View File

@ -32,7 +32,7 @@ namespace fs = std::experimental::filesystem;
* all the data publicly, and should therefore not be directly
* exposed to client functions. The GraphDbAccessor is used for that.
*/
class GraphDb : public Loggable {
class GraphDb {
public:
/**
* Construct database with a custom name.

View File

@ -1,5 +1,7 @@
#include <algorithm>
#include <glog/logging.h>
#include "communication/bolt/v1/encoder/base_encoder.hpp"
#include "database/graph_db_accessor.hpp"
#include "durability/file_writer_buffer.hpp"
@ -12,7 +14,7 @@ bool Snapshooter::MakeSnapshot(GraphDbAccessor &db_accessor_,
const int max_retained_snapshots) {
if (!fs::exists(snapshot_folder) &&
!fs::create_directories(snapshot_folder)) {
logger.error("Error while creating directory \"{}\"", snapshot_folder);
LOG(ERROR) << "Error while creating directory " << snapshot_folder;
return false;
}
const auto snapshot_file = GetSnapshotFileName(snapshot_folder);
@ -45,8 +47,8 @@ bool Snapshooter::Encode(const fs::path &snapshot_file,
buffer.Close();
} catch (std::ifstream::failure e) {
if (fs::exists(snapshot_file) && !fs::remove(snapshot_file)) {
logger.error("Error while removing corrupted snapshot file \"{}\"",
snapshot_file);
LOG(ERROR) << "Error while removing corrupted snapshot file: "
<< snapshot_file;
}
return false;
}
@ -77,7 +79,7 @@ void Snapshooter::MaintainMaxRetainedFiles(const fs::path &snapshot_folder,
for (int i = 0; i < static_cast<int>(files.size()) - max_retained_snapshots;
++i) {
if (!fs::remove(files[i])) {
logger.error("Error while removing file \"{}\"", files[i]);
LOG(ERROR) << "Error while removing file: " << files[i];
}
}
}

View File

@ -1,7 +1,5 @@
#pragma once
#include "logging/loggable.hpp"
#include <cstring>
#include <experimental/filesystem>
#include <vector>
@ -14,9 +12,9 @@ class GraphDbAccessor;
* Class responsible for making snapshots. Snapshots are stored in folder
* memgraph/build/$snapshot_folder/$db_name using bolt protocol.
*/
class Snapshooter : public Loggable {
class Snapshooter {
public:
Snapshooter() : Loggable("Snapshoter"){};
Snapshooter(){};
/**
* Make snapshot and save it in snapshots folder. Returns true if successful.
* @param db_accessor:

View File

@ -4,7 +4,6 @@
#include <sys/epoll.h>
#include "io/network/socket.hpp"
#include "logging/default.hpp"
#include "utils/exceptions.hpp"
#include "utils/likely.hpp"
@ -24,7 +23,7 @@ class Epoll {
public:
using Event = struct epoll_event;
Epoll(int flags) : logger_(logging::log->logger("io::Epoll")) {
Epoll(int flags) {
epoll_fd_ = epoll_create1(flags);
if (UNLIKELY(epoll_fd_ == -1))
@ -32,14 +31,14 @@ class Epoll {
}
template <class Stream>
void Add(Stream& stream, Event* event) {
void Add(Stream &stream, Event *event) {
auto status = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, stream, event);
if (UNLIKELY(status))
throw EpollError("Can't add an event to epoll listener.");
}
int Wait(Event* events, int max_events, int timeout) {
int Wait(Event *events, int max_events, int timeout) {
return epoll_wait(epoll_fd_, events, max_events, timeout);
}
@ -47,6 +46,5 @@ class Epoll {
private:
int epoll_fd_;
Logger logger_;
};
}

View File

@ -1,7 +1,8 @@
#pragma once
#include <glog/logging.h>
#include "io/network/epoll.hpp"
#include "logging/default.hpp"
#include "utils/crtp.hpp"
namespace io::network {
@ -15,8 +16,7 @@ class EventListener : public Crtp<Derived> {
public:
using Crtp<Derived>::derived;
EventListener(uint32_t flags = 0)
: listener_(flags), logger_(logging::log->logger("io::EventListener")) {}
EventListener(uint32_t flags = 0) : listener_(flags) {}
void WaitAndProcessEvents() {
// TODO hardcoded a wait timeout because of thread joining
@ -33,7 +33,7 @@ class EventListener : public Crtp<Derived> {
#ifndef NDEBUG
#ifndef LOG_NO_TRACE
if (n > 0) logger_.trace("number of events: {}", n);
DLOG_IF(INFO, n > 0) << "number of events: " << n;
#endif
#endif
@ -79,8 +79,5 @@ class EventListener : public Crtp<Derived> {
protected:
Epoll listener_;
Epoll::Event events_[max_events];
private:
Logger logger_;
};
}

View File

@ -13,21 +13,20 @@ namespace io::network {
template <class Derived, class Stream>
class StreamReader : public StreamListener<Derived, Stream> {
public:
StreamReader(uint32_t flags = 0)
: StreamListener<Derived, Stream>(flags),
logger_(logging::log->logger("io::StreamReader")) {}
StreamReader(uint32_t flags = 0) : StreamListener<Derived, Stream>(flags) {}
bool Accept(Socket &socket) {
logger_.trace("Accept");
DLOG(INFO) << "Accept";
// accept a connection from a socket
Socket s;
if (!socket.Accept(&s)) return false;
logger_.info("Client {}:{} connected.", s.endpoint().address(),
s.endpoint().port());
logger_.trace(
"Accepted a connection: scoket {}, address '{}', family {}, port {}",
std::cout << fmt::format("Client {}:{} connected.", s.endpoint().address(),
s.endpoint().port())
<< std::endl;
DLOG(INFO) << fmt::format(
"Accepted a connection: socket {}, address '{}', family {}, port {}",
s.id(), s.endpoint().address(), s.endpoint().family(),
s.endpoint().port());
@ -47,10 +46,10 @@ class StreamReader : public StreamListener<Derived, Stream> {
}
void OnData(Stream &stream) {
logger_.trace("On data");
DLOG(INFO) << "On data";
if (UNLIKELY(!stream.Alive())) {
logger_.trace("Calling OnClose because the stream isn't alive!");
DLOG(WARNING) << "Calling OnClose because the stream isn't alive!";
this->derived().OnClose(stream);
return;
}
@ -75,7 +74,7 @@ class StreamReader : public StreamListener<Derived, Stream> {
// end of file, the client has closed the connection
if (UNLIKELY(len == 0)) {
logger_.trace("Calling OnClose because the socket is closed!");
DLOG(WARNING) << "Calling OnClose because the socket is closed!";
this->derived().OnClose(stream);
return;
}
@ -85,8 +84,5 @@ class StreamReader : public StreamListener<Derived, Stream> {
this->derived().OnRead(stream);
}
private:
Logger logger_;
};
}

View File

@ -1,37 +0,0 @@
#include "logging/default.hpp"
#include "logging/logs/async_log.hpp"
#include "logging/logs/sync_log.hpp"
#include "logging/streams/stdout.hpp"
namespace logging {
// per object log source
// TODO: discussion
std::unique_ptr<Log> log;
void init_async() { log = std::make_unique<AsyncLog>(); }
void init_sync() { log = std::make_unique<SyncLog>(); }
// "global" DEBUG logger
std::unique_ptr<Log> debug_log = std::make_unique<SyncLog>();
Logger init_debug_logger() {
debug_log->pipe(std::make_unique<Stdout>());
return debug_log->logger("DEBUG");
}
Logger debug_logger = init_debug_logger();
// "global" INFO logger
std::unique_ptr<Log> info_log = std::make_unique<SyncLog>();
Logger init_info_logger() {
info_log->pipe(std::make_unique<Stdout>());
return info_log->logger("INFO");
}
Logger info_logger = init_info_logger();
}

View File

@ -1,26 +0,0 @@
#pragma once
#include "logging/log.hpp"
#include "logging/logger.hpp"
namespace logging {
extern std::unique_ptr<Log> log;
extern Logger debug_logger;
template <class... Args>
void debug(Args&&... args) {
debug_logger.debug(std::forward<Args>(args)...);
}
extern Logger info_logger;
template <class... Args>
void info(Args&&... args) {
info_logger.info(std::forward<Args>(args)...);
}
void init_async();
void init_sync();
}

View File

@ -1,7 +0,0 @@
#include "logging/levels.hpp"
std::string Trace::text = "TRACE";
std::string Debug::text = "DEBUG";
std::string Info::text = "INFO";
std::string Warn::text = "WARN";
std::string Error::text = "ERROR";

View File

@ -1,28 +0,0 @@
#pragma once
#include <string>
struct Trace {
static std::string text;
static constexpr unsigned level = 0;
};
struct Debug {
static std::string text;
static constexpr unsigned level = 10;
};
struct Info {
static std::string text;
static constexpr unsigned level = 20;
};
struct Warn {
static std::string text;
static constexpr unsigned level = 30;
};
struct Error {
static std::string text;
static constexpr unsigned level = 40;
};

View File

@ -1,14 +0,0 @@
#include <iostream>
#include "logging/log.hpp"
#include "logging/logger.hpp"
#include "utils/assert.hpp"
Logger Log::logger(const std::string &name) {
// TODO: once when properties are refactored enable this
// debug_assert(this != nullptr,
// "This shouldn't be null. This method is "
// "called before the log object is created. "
// "E.g. static variables before main method.");
return Logger(this, name);
}

View File

@ -1,60 +0,0 @@
#pragma once
#include <string>
#include <vector>
#include "utils/datetime/timestamp.hpp"
class Logger;
class Log {
public:
using uptr = std::unique_ptr<Log>;
class Record {
public:
using uptr = std::unique_ptr<Record>;
Record() = default;
virtual ~Record() = default;
virtual const Timestamp& when() const = 0;
virtual const std::string& where() const = 0;
virtual unsigned level() const = 0;
virtual const std::string& level_str() const = 0;
virtual const std::string& text() const = 0;
};
class Stream {
public:
using uptr = std::unique_ptr<Stream>;
Stream() = default;
virtual ~Stream() = default;
virtual void emit(const Record&) = 0;
};
virtual ~Log() = default;
Logger logger(const std::string& name);
void pipe(Stream::uptr&& stream) {
streams.emplace_back(std::forward<Stream::uptr>(stream));
}
virtual std::string type() = 0;
protected:
friend class Logger;
virtual void emit(Record::uptr record) = 0;
void dispatch(const Record& record) {
for (auto& stream : streams) stream->emit(record);
}
std::vector<Stream::uptr> streams;
};

View File

@ -1,25 +0,0 @@
#pragma once
#include "logging/default.hpp"
/**
* @class Loggable
*
* @brief Base class that could be used in all classed which need a logging
* functionality.
*/
class Loggable {
public:
/**
* Sets logger name.
*/
Loggable(const std::string &name) : logger(logging::log->logger(name)) {}
virtual ~Loggable() {}
protected:
/**
* Logger instance that can be used only from derived classes.
*/
Logger logger;
};

View File

@ -1,95 +0,0 @@
#pragma once
#include <fmt/format.h>
#include "logging/levels.hpp"
#include "logging/log.hpp"
#include "utils/assert.hpp"
class Logger {
template <class Level>
class Message : public Log::Record {
public:
Message(Timestamp timestamp, std::string location, std::string message)
: timestamp(timestamp), location(location), message(message) {}
const Timestamp &when() const override { return timestamp; }
const std::string &where() const override { return location; }
unsigned level() const override { return Level::level; }
const std::string &level_str() const override { return Level::text; }
const std::string &text() const override { return message; }
private:
Timestamp timestamp;
std::string location;
std::string message;
};
public:
Logger() = default;
Logger(Log *log, const std::string &name) : log(log), name(name) {}
template <class Level, class... Args>
void emit(Args &&... args) {
if (log == nullptr) return;
auto message = std::make_unique<Message<Level>>(
Timestamp::now(), name, fmt::format(std::forward<Args>(args)...));
log->emit(std::move(message));
}
/**
*@brief Return if the logger is initialized.
*@return true if initialized, false otherwise.
*/
bool Initialized() { return log != nullptr; }
template <class... Args>
void trace(Args &&... args) {
#ifndef NDEBUG
#ifndef LOG_NO_TRACE
emit<Trace>(std::forward<Args>(args)...);
#endif
#endif
}
template <class... Args>
void debug(Args &&... args) {
#ifndef NDEBUG
#ifndef LOG_NO_DEBUG
emit<Debug>(std::forward<Args>(args)...);
#endif
#endif
}
template <class... Args>
void info(Args &&... args) {
#ifndef LOG_NO_INFO
emit<Info>(std::forward<Args>(args)...);
#endif
}
template <class... Args>
void warn(Args &&... args) {
#ifndef LOG_NO_WARN
emit<Warn>(std::forward<Args>(args)...);
#endif
}
template <class... Args>
void error(Args &&... args) {
#ifndef LOG_NO_ERROR
emit<Error>(std::forward<Args>(args)...);
#endif
}
private:
Log *log;
std::string name;
};

View File

@ -1,27 +0,0 @@
#include "logging/logs/async_log.hpp"
AsyncLog::~AsyncLog() {
alive.store(false);
worker.join();
}
void AsyncLog::emit(Record::uptr record) { records.push(std::move(record)); }
std::string AsyncLog::type() { return "AsyncLog"; }
void AsyncLog::work() {
using namespace std::chrono_literals;
while (true) {
auto record = records.pop();
if (record != nullptr) {
dispatch(*record);
continue;
}
if (!alive) return;
std::this_thread::sleep_for(10ms);
}
}

View File

@ -1,22 +0,0 @@
#pragma once
#include <thread>
#include "data_structures/queue/mpsc_queue.hpp"
#include "logging/log.hpp"
class AsyncLog : public Log {
public:
~AsyncLog();
protected:
void emit(Record::uptr) override;
std::string type() override;
private:
lockfree::MpscQueue<Record> records;
std::atomic<bool> alive{true};
std::thread worker{[this]() { work(); }};
void work();
};

View File

@ -1,8 +0,0 @@
#include "logging/logs/sync_log.hpp"
void SyncLog::emit(Record::uptr record) {
auto guard = this->acquire_unique();
dispatch(*record);
}
std::string SyncLog::type() { return "SyncLog"; }

View File

@ -1,11 +0,0 @@
#pragma once
#include "logging/log.hpp"
#include "threading/sync/futex.hpp"
#include "threading/sync/lockable.hpp"
class SyncLog : public Log, Lockable<Futex> {
protected:
void emit(Record::uptr) override;
std::string type() override;
};

View File

@ -1,17 +0,0 @@
#pragma once
#include "logging/log.hpp"
#include "logging/streams/format.hpp"
class File : public Log::Stream {
public:
File(const std::string &filename) : file_(filename) {}
void emit(const Log::Record &record) override {
file_ << logging::Formatter::format(logging::format::out, record);
file_.flush();
}
private:
std::ofstream file_;
};

View File

@ -1,22 +0,0 @@
#pragma once
#include <fmt/format.h>
#include <string>
#include "logging/log.hpp"
namespace logging::format {
// TODO: read formats from the config
static const std::string out = "{} {:<5} [{}] {}\n";
static const std::string err = out;
}
namespace logging {
class Formatter {
public:
static std::string format(const std::string &format,
const Log::Record &record) {
return fmt::format(format, static_cast<std::string>(record.when()),
record.level_str(), record.where(), record.text());
}
};
}

View File

@ -1,8 +0,0 @@
#include "logging/streams/stderr.hpp"
#include <iostream>
#include "logging/streams/format.hpp"
void Stderr::emit(const Log::Record& record) {
std::cerr << logging::Formatter::format(logging::format::err, record);
}

View File

@ -1,8 +0,0 @@
#pragma once
#include "logging/log.hpp"
class Stderr : public Log::Stream {
public:
void emit(const Log::Record&) override;
};

View File

@ -1,8 +0,0 @@
#include "logging/streams/stdout.hpp"
#include <iostream>
#include "logging/streams/format.hpp"
void Stdout::emit(const Log::Record& record) {
std::cout << logging::Formatter::format(logging::format::out, record);
}

View File

@ -1,8 +0,0 @@
#pragma once
#include "logging/log.hpp"
class Stdout : public Log::Stream {
public:
void emit(const Log::Record&) override;
};

View File

@ -2,6 +2,7 @@
#include <iostream>
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "dbms/dbms.hpp"
#include "query/engine.hpp"
@ -13,10 +14,6 @@
#include "io/network/network_error.hpp"
#include "io/network/socket.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "logging/streams/file.hpp"
#include "utils/flag_validation.hpp"
#include "utils/signals/handler.hpp"
#include "utils/stacktrace.hpp"
@ -32,8 +29,6 @@ using result_stream_t =
using bolt_server_t =
communication::Server<session_t, result_stream_t, socket_t>;
Logger logger;
DEFINE_string(interface, "0.0.0.0", "Default interface on which to listen.");
DEFINE_string(port, "7687", "Default port on which to listen.");
DEFINE_VALIDATED_int32(num_workers,
@ -87,20 +82,8 @@ int main(int argc, char **argv) {
fs::current_path(fs::path(argv[0]).parent_path());
load_config(argc, argv);
// Logging init.
#ifdef SYNC_LOGGER
logging::init_sync();
#else
logging::init_async();
#endif
#ifndef LOG_NO_STDOUT
logging::log->pipe(std::make_unique<Stdout>());
#endif
logging::log->pipe(std::make_unique<File>(FLAGS_log_file));
// Get logger.
logger = logging::log->logger("Main");
logger.debug("Using {} logger", logging::log->type());
google::InitGoogleLogging(argv[0]);
google::SetLogDestination(google::INFO, FLAGS_log_file.c_str());
// Unhandled exception handler init.
std::set_terminate(&terminate_handler);
@ -124,23 +107,22 @@ int main(int argc, char **argv) {
try {
endpoint = endpoint_t(FLAGS_interface, FLAGS_port);
} catch (io::network::NetworkEndpointException &e) {
logger.error("{}", e.what());
std::exit(EXIT_FAILURE);
LOG(FATAL) << e.what();
}
// Initialize socket.
socket_t socket;
if (!socket.Bind(endpoint)) {
logger.error("Cannot bind to socket on {} at {}", FLAGS_interface,
FLAGS_port);
LOG(ERROR) << "Cannot bind to socket on " << FLAGS_interface << " at "
<< FLAGS_port;
std::exit(EXIT_FAILURE);
}
if (!socket.SetNonBlocking()) {
logger.error("Cannot set socket to non blocking!");
LOG(ERROR) << "Cannot set socket to non blocking!";
std::exit(EXIT_FAILURE);
}
if (!socket.Listen(1024)) {
logger.error("Cannot listen on socket!");
LOG(ERROR) << "Cannot listen on socket!";
std::exit(EXIT_FAILURE);
}

View File

@ -3,14 +3,16 @@
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
#include <glog/logging.h>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "database/graph_db.hpp"
#include "logging/loggable.hpp"
#include "query/exceptions.hpp"
#include "query/frontend/opencypher/parser.hpp"
#include "query/interpreter.hpp"
#include "query/plan_compiler.hpp"
#include "query/plan_interface.hpp"
#include "utils/datetime/timestamp.hpp"
#include "utils/dynamic_lib.hpp"
DECLARE_bool(interpret);
@ -28,12 +30,12 @@ DECLARE_string(compile_directory);
* the whole result set)
*/
template <typename Stream>
class QueryEngine : public Loggable {
class QueryEngine {
private:
using QueryPlanLib = utils::DynamicLib<QueryPlanTrait<Stream>>;
public:
QueryEngine() : Loggable("QueryEngine") {}
QueryEngine() {}
/**
* Reloads query plan (plan_path contains compiled query plan).
@ -79,7 +81,7 @@ class QueryEngine : public Loggable {
if (UNLIKELY(!result)) {
// info because it might be something like deadlock in which
// case one thread is stopped and user has try again
logger.info("Unable to execute query (execution returned false)");
LOG(ERROR) << "Unable to execute query (execution returned false)";
return result;
}

View File

@ -5,7 +5,6 @@
#include <vector>
#include "antlr4-runtime.h"
#include "logging/loggable.hpp"
#include "query/common.hpp"
#include "query/frontend/opencypher/generated/CypherBaseVisitor.h"
#include "query/frontend/opencypher/generated/CypherLexer.h"

View File

@ -1,6 +1,5 @@
#pragma once
#include "logging/loggable.hpp"
#include "query/parameters.hpp"
#include "query/typed_value.hpp"
#include "utils/assert.hpp"

View File

@ -3,7 +3,7 @@
// TODO: Remove this flag. Ast caching can be disabled by setting this flag to
// false, this is useful for recerating antlr crashes in highly concurrent test.
// Once antlr bugs are fixed, or real test is written this flag can be removed.
DEFINE_bool(ast_cache, true, "Use ast caching.");
DEFINE_bool(ast_cache, false, "Use ast caching.");
DEFINE_bool(query_cost_planner, true,
"Use the cost estimator to generate plans for queries.");

View File

@ -3,8 +3,10 @@
#include <ctime>
#include <limits>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "database/graph_db_accessor.hpp"
#include "gflags/gflags.h"
#include "query/context.hpp"
#include "query/frontend/ast/cypher_main_visitor.hpp"
#include "query/frontend/opencypher/parser.hpp"
@ -20,9 +22,9 @@ DECLARE_bool(query_cost_planner);
namespace query {
class Interpreter : public Loggable {
class Interpreter {
public:
Interpreter() : Loggable("Interpreter") {}
Interpreter() {}
template <typename Stream>
void Interpret(const std::string &query, GraphDbAccessor &db_accessor,
Stream &stream) {
@ -163,7 +165,7 @@ class Interpreter : public Loggable {
// have to be correct (for Bolt clients)
summary["type"] = "rw";
stream.Summary(summary);
logger.info("Execute '{}', {}", query, summary);
LOG(INFO) << "Execute " << query << ", " << summary;
}
private:

View File

@ -1,6 +1,5 @@
#include "logging/loggable.hpp"
#include "query/plan/operator.hpp"
#include "query/frontend/ast/ast.hpp"
#include "query/plan/operator.hpp"
#include "query/typed_value.hpp"
namespace query::plan {
@ -30,7 +29,7 @@ namespace query::plan {
* for a single query part, and query part reordering is
* not allowed.
*/
class CostEstimator : public HierarchicalLogicalOperatorVisitor, Loggable {
class CostEstimator : public HierarchicalLogicalOperatorVisitor {
public:
struct CostParam {
static constexpr double kScanAll{1.0};
@ -55,7 +54,7 @@ class CostEstimator : public HierarchicalLogicalOperatorVisitor, Loggable {
using HierarchicalLogicalOperatorVisitor::PostVisit;
CostEstimator(const GraphDbAccessor &db_accessor)
: Loggable("QueryCostEstimator"), db_accessor_(db_accessor) {}
: db_accessor_(db_accessor) {}
bool PostVisit(ScanAll &) override;
bool PostVisit(ScanAllByLabel &scan_all_by_label) override;

View File

@ -2,8 +2,8 @@
#include <string>
#include "logging/default.hpp"
#include "logging/loggable.hpp"
#include <glog/logging.h>
#include "query/exceptions.hpp"
#include "query/plan_compiler_flags.hpp"
#include "utils/string.hpp"
@ -15,9 +15,9 @@
/**
* Compiles code into shared object (.so)
*/
class PlanCompiler : public Loggable {
class PlanCompiler {
public:
PlanCompiler() : Loggable("PlanCompiler") {}
PlanCompiler() {}
/**
* Compiles in_file into out_file (.cpp -> .so)
@ -40,22 +40,23 @@ class PlanCompiler : public Loggable {
"-shared -fPIC"}, // shared library flags
" ");
logger.debug("compile command -> {}", compile_command);
DLOG(INFO) << "compile command -> " << compile_command;
// synchronous call
auto compile_status = system(compile_command.c_str());
logger.debug("compile status {}", compile_status);
DLOG(INFO) << "compile status " << compile_status;
// if compilation has failed throw exception
if (compile_status != 0) {
logger.debug("FAIL: Query Code Compilation: {} -> {}", in_file, out_file);
DLOG(ERROR) << "FAIL: Query Code Compilation: " << in_file << " -> "
<< out_file;
throw query::PlanCompilationException(
"Code compilation error. Generated code is not compilable or "
"compilation settings are wrong");
}
logger.debug("SUCCESS: Query Code Compilation: {} -> {}", in_file,
out_file);
DLOG(ERROR) << "SUCCESS: Query Code Compilation: " << in_file << " -> "
<< out_file;
}
};

View File

@ -3,6 +3,7 @@
#include "gflags/gflags.h"
#include "gflags/gflags_declare.h"
#include "glog/logging.h"
#include "data_structures/bitset/static_bitset.hpp"
#include "communication/bolt/v1/encoder/result_stream.hpp"
#include "io/network/socket.hpp"

View File

@ -1,7 +1,8 @@
#pragma once
#include <glog/logging.h>
#include "data_structures/concurrent/skiplist.hpp"
#include "logging/loggable.hpp"
#include "mvcc/version_list.hpp"
#include "storage/deferred_deleter.hpp"
#include "transactions/engine.hpp"
@ -11,13 +12,12 @@
* @Tparam T type of underlying record in mvcc
*/
template <typename T>
class GarbageCollector : public Loggable {
class GarbageCollector {
public:
GarbageCollector(SkipList<mvcc::VersionList<T> *> &skiplist,
DeferredDeleter<T> &record_deleter,
DeferredDeleter<mvcc::VersionList<T>> &version_list_deleter)
: Loggable("MvccGc"),
skiplist_(skiplist),
: skiplist_(skiplist),
record_deleter_(record_deleter),
version_list_deleter_(version_list_deleter){};
@ -34,8 +34,6 @@ class GarbageCollector : public Loggable {
uint64_t count = 0;
std::vector<T *> deleted_records;
std::vector<mvcc::VersionList<T> *> deleted_version_lists;
if (logger.Initialized())
logger.trace("GC started cleaning with snapshot: ", snapshot);
for (auto version_list : collection_accessor) {
// If the version_list is empty, i.e. there is nothing else to be read
// from it we can delete it.
@ -46,7 +44,9 @@ class GarbageCollector : public Loggable {
}
if (ret.second != nullptr) deleted_records.push_back(ret.second);
}
if (logger.Initialized()) logger.trace("Destroyed: {}", count);
DLOG_IF(INFO, count > 0)
<< "GC started cleaning with snapshot: " << snapshot;
DLOG_IF(INFO, count > 0) << "Destroyed: " << count;
// Add records to deleter, with the id larger or equal than the last active
// transaction.

View File

@ -1,8 +1,7 @@
#pragma once
#include <algorithm>
#include "logging/default.hpp"
#include <string>
/**
* Goes from first to last item in a container, if an element satisfying the

View File

@ -1,13 +1,15 @@
#pragma once
#include <dlfcn.h>
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
#include <stdexcept>
#include <string>
#include "utils/exceptions.hpp"
namespace fs = std::experimental::filesystem;
#include "logging/loggable.hpp"
#include <glog/logging.h>
#include "utils/exceptions.hpp"
namespace utils {
@ -34,7 +36,7 @@ class DynamicLibException : public utils::BasicException {
* of undelying dynamic object.
*/
template <typename T>
class DynamicLib : public Loggable {
class DynamicLib {
public:
/**
* Initializes dynamic library (loads lib, produce and
@ -43,7 +45,7 @@ class DynamicLib : public Loggable {
* @param lib_path file system path to dynamic library
*/
DynamicLib(const fs::path &lib_path)
: Loggable("DynamicLib"), lib_path(lib_path), lib_object(nullptr) {
: lib_path(lib_path), lib_object(nullptr) {
// load dynamic lib
// I've added the RTL_DEEPBIND flag when we are opening the dynamic_lib to
// resolve symbols locally instead of globally. For additional information
@ -51,7 +53,7 @@ class DynamicLib : public Loggable {
dynamic_lib = dlopen(lib_path.c_str(), RTLD_NOW | RTLD_DEEPBIND);
if (!dynamic_lib) throw DynamicLibException(dlerror());
dlerror(); /* Clear any existing error */
logger.trace("dynamic lib at ADDRESS({}) was opened", dynamic_lib);
DLOG(INFO) << "dynamic lib at ADDRESS " << dynamic_lib << " was opened";
// load produce method
this->produce_method =
@ -93,15 +95,15 @@ class DynamicLib : public Loggable {
~DynamicLib() {
// first destroy underlying object
if (lib_object != nullptr) {
logger.trace("shared object at ADDRESS({}) will be destroyed",
(void *)lib_object);
DLOG(INFO) << "shared object at ADDRESS " << (void *)lib_object
<< " will be destroyed.";
this->destruct_method(lib_object);
}
// then destroy dynamic lib
logger.trace("unloading lib {}", lib_path.c_str());
DLOG(INFO) << "unloading lib " << lib_path.c_str();
if (dynamic_lib != nullptr) {
logger.trace("closing dynamic lib ADDRESS({})", (void *)dynamic_lib);
DLOG(INFO) << "closing dynamic lib ADDRESS " << (void *)dynamic_lib;
// // IMPORTANT: weird problem the program SEGFAULT on dlclose
// // TODO: FIX somehow
// // maybe something similar is:
@ -115,7 +117,7 @@ class DynamicLib : public Loggable {
int closing_status = dlclose(dynamic_lib);
if (closing_status != 0) throw DynamicLibException(dlerror());
} else {
logger.trace("unload lib was called but lib ptr is null");
DLOG(WARNING) << "unload lib was called but lib ptr is null";
}
}

View File

@ -12,6 +12,7 @@
#include <limits.h>
#include <sys/inotify.h>
#include <unistd.h>
#include <atomic>
#include <chrono>
#include <mutex>
@ -21,7 +22,8 @@
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
#include "logging/loggable.hpp"
#include <glog/logging.h>
#include "utils/algorithm.hpp"
#include "utils/assert.hpp"
#include "utils/exceptions.hpp"
@ -163,7 +165,7 @@ class FSWatcherException : public StacktraceException {
* parameters:
* * interval - time between two checks for the new file system events
*/
class FSWatcher : public Loggable {
class FSWatcher {
/**
* callback type (the code that will be notified will be notified
* through callback of this type
@ -174,10 +176,9 @@ class FSWatcher : public Loggable {
/**
* Initialize underlying notification system.
*/
FSWatcher(ms check_interval = ms(100))
: Loggable("FSWatcher"), check_interval_(check_interval) {
logger.debug("Inotify header length: {}", IN_HEADER_SIZE);
logger.debug("Inotify buffer length: {}", IN_BUFF_LEN);
FSWatcher(ms check_interval = ms(100)) : check_interval_(check_interval) {
DLOG(INFO) << fmt::format("Inotify header length: {}", IN_HEADER_SIZE);
DLOG(INFO) << fmt::format("Inotify buffer length: {}", IN_BUFF_LEN);
inotify_fd_ = inotify_init();
if (inotify_fd_ == -1)
throw FSWatcherException("Unable to initialize inotify\n");
@ -185,7 +186,7 @@ class FSWatcher : public Loggable {
}
~FSWatcher() {
logger.debug("destructor call");
DLOG(INFO) << "destructor call";
unwatchAll();
}
@ -257,8 +258,9 @@ class FSWatcher : public Loggable {
entries_.emplace_back(Entry(wd, descriptor, callback));
}
logger.debug("REGISTERED: wd({}) for path({}) and mask ({})", wd,
descriptor.path.c_str(), (os_mask_t)(descriptor));
DLOG(INFO) << fmt::format("REGISTERED: wd({}) for path({}) and mask ({})",
wd, descriptor.path.c_str(),
(os_mask_t)(descriptor));
start();
}
@ -309,7 +311,7 @@ class FSWatcher : public Loggable {
// run separate thread
dispatch_thread_ = std::thread([this]() {
logger.debug("dispatch thread - start");
DLOG(INFO) << "dispatch thread - start";
while (is_running_.load()) {
std::this_thread::sleep_for(check_interval_);
@ -321,7 +323,7 @@ class FSWatcher : public Loggable {
if (n == 0) throw FSWatcherException("read() -> 0.");
if (n == -1) continue;
logger.info("Read {} bytes from inotify fd", (long)n);
DLOG(INFO) << fmt::format("Read {} bytes from inotify fd", (long)n);
// process all of the events in buffer returned by read()
for (auto p = buffer_; p < buffer_ + n;) {
@ -348,8 +350,9 @@ class FSWatcher : public Loggable {
continue;
}
logger.info("LEN: {}, MASK: {}, NAME: {}", in_event_length,
in_event->mask, in_event->name);
DLOG(INFO) << fmt::format("LEN: {}, MASK: {}, NAME: {}",
in_event_length, in_event->mask,
in_event->name);
// find our watch descriptor
auto entry = find_if(
@ -366,7 +369,7 @@ class FSWatcher : public Loggable {
}
}
logger.debug("dispatch thread - finish");
DLOG(INFO) << "dispatch thread - finish";
});
}
@ -413,8 +416,8 @@ class FSWatcher : public Loggable {
if (status == -1)
throw FSWatcherException("Unable to remove underlaying watch.");
else
logger.info("UNREGISTER: fd({}), wd({}), status({})", inotify_fd_,
entry.os_wd, status);
DLOG(INFO) << fmt::format("UNREGISTER: fd({}), wd({}), status({})",
inotify_fd_, entry.os_wd, status);
}
/**

View File

@ -6,7 +6,7 @@
#include <set>
#include <thread>
#include "logging/default.hpp"
#include <glog/logging.h>
namespace utils {
@ -134,7 +134,7 @@ class TimerScheduler {
while (is_running.load()) {
std::this_thread::sleep_for(delta_time_type(delta_time));
timer_container.process();
logging::info("timer_container.process()");
DLOG(INFO) << "timer_container.process()";
}
});
}

View File

@ -1,11 +1,10 @@
#include <random>
#include <thread>
#include "benchmark/benchmark_api.h"
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "data_structures/bloom/bloom_filter.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "utils/random/random_generator.hpp"
@ -31,8 +30,7 @@ auto BM_Bloom = [](benchmark::State &state, auto *bloom, const auto &elements) {
};
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
StringGenerator generator(4);
@ -52,4 +50,5 @@ int main(int argc, char **argv) {
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,16 +1,15 @@
#include <random>
#include <thread>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "data_structures/bloom/bloom_filter.hpp"
#include "data_structures/concurrent/concurrent_bloom_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "utils/random/random_generator.hpp"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
@ -104,8 +103,7 @@ void parse_arguments(int argc, char **argv) {
}
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
parse_arguments(argc, argv);

View File

@ -1,14 +1,13 @@
#include <random>
#include <thread>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/random_generator.hpp"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
@ -120,8 +119,7 @@ void parse_arguments(int argc, char **argv) {
}
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
parse_arguments(argc, argv);

View File

@ -1,14 +1,13 @@
#include <random>
#include <thread>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/random_generator.hpp"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Deletion and Find
@ -98,8 +97,7 @@ void parse_arguments(int argc, char **argv) {
}
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
parse_arguments(argc, argv);

View File

@ -6,15 +6,15 @@
#include <thread>
#include <vector>
#include "benchmark/benchmark.h"
#include "benchmark/benchmark_api.h"
#include <benchmark/benchmark.h>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "data_structures/concurrent/skiplist.hpp"
#include "logging/default.hpp"
#include "logging/streams/stderr.hpp"
#include "skiplist_helper.hpp"
#include "utils/assert.hpp"
void Insert(benchmark::State& state) {
void Insert(benchmark::State &state) {
SkipList<int> skiplist;
while (state.KeepRunning()) {
const int count = SkipListHelper::InsertConcurrentSkiplistTimed(
@ -35,7 +35,7 @@ void Insert(benchmark::State& state) {
* Invokes the test function with two arguments, time and number of threads.
* Time is specified in microseconds.
*/
static void CustomArguments(benchmark::internal::Benchmark* b) {
static void CustomArguments(benchmark::internal::Benchmark *b) {
for (int i = (1 << 18); i <= (1 << 20); i *= 2)
for (int j = 1; j <= 8; ++j) b->Args({i, j});
}
@ -53,10 +53,8 @@ BENCHMARK(Insert)
->Repetitions(3)
->ReportAggregatesOnly(1);
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stderr>());
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
return 0;

View File

@ -10,10 +10,10 @@
#include <thread>
#include <vector>
#include <glog/logging.h>
#include "benchmark/benchmark_api.h"
#include "data_structures/concurrent/skiplist.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "skiplist_helper.hpp"
#include "utils/random/random_generator.hpp"
@ -69,8 +69,7 @@ auto BM_ReverseFromRBegin = [](benchmark::State &state) {
auto BM_FindFromRBegin = [](benchmark::State &state) { FindFromRBegin(state); };
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
benchmark::RegisterBenchmark("ReverseFromRBegin", BM_ReverseFromRBegin)
->RangeMultiplier(2)

View File

@ -1,7 +1,7 @@
#include "benchmark/benchmark.h"
#include "benchmark/benchmark_api.h"
#include "logging/default.hpp"
#include "logging/streams/stderr.hpp"
#include <benchmark/benchmark.h>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "mvcc/record.hpp"
#include "mvcc/version_list.hpp"
@ -55,9 +55,9 @@ BENCHMARK(MvccMix)
->Range(1 << 14, 1 << 23) // 1<<14, 1<<15, 1<<16, ...
->Unit(benchmark::kMillisecond);
DEFINE_string(hehehe, "bok", "ne");
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stderr>());
google::InitGoogleLogging(argv[0]);
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();

View File

@ -5,9 +5,9 @@
#include <string>
#include <vector>
#include "benchmark/benchmark_api.h"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "query/frontend/stripped.hpp"
auto BM_Strip = [](benchmark::State &state, auto &function, std::string query) {
@ -20,8 +20,7 @@ auto BM_Strip = [](benchmark::State &state, auto &function, std::string query) {
};
int main(int argc, char *argv[]) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
auto preprocess = [](const std::string &query) {
return query::StrippedQuery(query);

View File

@ -4,14 +4,14 @@
#include <random>
#include <thread>
#include <glog/logging.h>
#include "data_structures/bitset/dynamic_bitset.hpp"
#include "data_structures/concurrent/concurrent_list.hpp"
#include "data_structures/concurrent/concurrent_map.hpp"
#include "data_structures/concurrent/concurrent_set.hpp"
#include "data_structures/concurrent/skiplist.hpp"
#include "data_structures/static_array.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/assert.hpp"
#include "utils/sysinfo/memory.hpp"
@ -73,9 +73,9 @@ void check_size_list(S &acc, long long size) {
for ([[gnu::unused]] auto elem : acc) {
++iterator_counter;
}
permanent_assert(iterator_counter == size, "Iterator count should be "
<< size << ", but size is "
<< iterator_counter);
permanent_assert(iterator_counter == size,
"Iterator count should be " << size << ", but size is "
<< iterator_counter);
}
template <typename S>
void check_size(typename S::Accessor &acc, long long size) {
@ -91,9 +91,9 @@ void check_size(typename S::Accessor &acc, long long size) {
for ([[gnu::unused]] auto elem : acc) {
++iterator_counter;
}
permanent_assert(iterator_counter == size, "Iterator count should be "
<< size << ", but size is "
<< iterator_counter);
permanent_assert(iterator_counter == size,
"Iterator count should be " << size << ", but size is "
<< iterator_counter);
}
// Checks if order in list is maintened. It expects map
@ -202,7 +202,7 @@ auto insert_try(typename S::Accessor &acc, long long &downcount,
// is aproximately equal to memory usage after function. Memory usage is thread
// senstive so no_threads spawned in function is necessary.
void memory_check(size_t no_threads, std::function<void()> f) {
logging::info("Number of threads: {}", no_threads);
DLOG(INFO) << fmt::format("Number of threads: {}", no_threads);
// TODO: replace vm_size with something more appropriate
// the past implementation was teribble wrong
@ -212,24 +212,18 @@ void memory_check(size_t no_threads, std::function<void()> f) {
// OR
// user Boost.Test
auto start = vm_size();
logging::info("Memory check (used memory at the beginning): {}", start);
DLOG(INFO) << fmt::format("Memory check (used memory at the beginning): {}",
start);
f();
auto end = vm_size();
logging::info("Memory check (used memory at the end): {}", end);
DLOG(INFO) << fmt::format("Memory check (used memory at the end): {}", end);
long long delta = end - start;
logging::info("Delta: {}", delta);
DLOG(INFO) << fmt::format("Delta: {}", delta);
// TODO: do memory check somehow
// the past implementation was wrong
permanent_assert(true, "Memory leak");
}
// TODO: move this inside logging/default
// Initializes loging faccilityes
void init_log() {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
}

View File

@ -12,8 +12,8 @@ constexpr size_t no_insert_for_one_delete = 1;
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
// no_find_per_change and no_insert_for_one_delete.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
ConcurrentList<std::pair<int, int>> list;
permanent_assert(list.size() == 0, "The list isn't empty");

View File

@ -1,9 +1,9 @@
#include <chrono>
#include <iostream>
#include "gtest/gtest.h"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "logging/default.cpp"
#include "utils/assert.hpp"
#include "utils/timer/timer.hpp"
@ -19,7 +19,7 @@ using namespace utils;
*/
Timer::sptr create_test_timer(int64_t counter) {
return std::make_shared<Timer>(counter,
[]() { logging::info("Timer timeout"); });
[]() { DLOG(INFO) << "Timer timeout"; });
}
TEST(TimerSchedulerTest, TimerSchedulerExecution) {
@ -52,6 +52,7 @@ TEST(TimerSchedulerTest, TimerSchedulerExecution) {
}
int main(int argc, char **argv) {
::google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -6,10 +6,8 @@
#include <iostream>
#include <vector>
#include "gtest/gtest.h"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "communication/bolt/v1/decoder/buffer.hpp"
#include "communication/server.hpp"
@ -28,10 +26,9 @@ class TestOutputStream {};
class TestSession {
public:
TestSession(socket_t&& socket, Dbms& dbms,
QueryEngine<TestOutputStream>& query_engine)
: logger_(logging::log->logger("TestSession")),
socket_(std::move(socket)) {
TestSession(socket_t &&socket, Dbms &dbms,
QueryEngine<TestOutputStream> &query_engine)
: socket_(std::move(socket)) {
event_.data.ptr = this;
}
@ -53,21 +50,16 @@ class TestSession {
buffer_.Shift(size + 2);
}
io::network::StreamBuffer Allocate() {
return buffer_.Allocate();
}
io::network::StreamBuffer Allocate() { return buffer_.Allocate(); }
void Written(size_t len) {
buffer_.Written(len);
}
void Written(size_t len) { buffer_.Written(len); }
void Close() {
logger_.trace("Close session!");
DLOG(INFO) << "Close session!";
this->socket_.Close();
}
communication::bolt::Buffer<SIZE * 2> buffer_;
Logger logger_;
socket_t socket_;
io::network::Epoll::Event event_;
};
@ -75,43 +67,42 @@ class TestSession {
using test_server_t =
communication::Server<TestSession, TestOutputStream, socket_t>;
void server_start(void* serverptr, int num) {
((test_server_t*)serverptr)->Start(num);
void server_start(void *serverptr, int num) {
((test_server_t *)serverptr)->Start(num);
}
void client_run(int num, const char* interface, const char* port,
const unsigned char* data, int lo, int hi) {
void client_run(int num, const char *interface, const char *port,
const unsigned char *data, int lo, int hi) {
std::stringstream name;
name << "Client " << num;
Logger logger = logging::log->logger(name.str());
unsigned char buffer[SIZE * REPLY], head[2];
int have, read;
endpoint_t endpoint(interface, port);
socket_t socket;
ASSERT_TRUE(socket.Connect(endpoint));
ASSERT_TRUE(socket.SetTimeout(2, 0));
logger.trace("Socket create: {}", socket.id());
DLOG(INFO) << "Socket create: " << socket.id();
for (int len = lo; len <= hi; len += 100) {
have = 0;
head[0] = (len >> 8) & 0xff;
head[1] = len & 0xff;
ASSERT_TRUE(socket.Write(head, 2));
ASSERT_TRUE(socket.Write(data, len));
logger.trace("Socket write: {}", socket.id());
DLOG(INFO) << "Socket write: " << socket.id();
while (have < len * REPLY) {
read = socket.Read(buffer + have, SIZE);
logger.trace("Socket read: {}", socket.id());
DLOG(INFO) << "Socket read: " << socket.id();
if (read == -1) break;
have += read;
}
for (int i = 0; i < REPLY; ++i)
for (int j = 0; j < len; ++j) ASSERT_EQ(buffer[i * len + j], data[j]);
}
logger.trace("Socket done: {}", socket.id());
DLOG(INFO) << "Socket done: " << socket.id();
socket.Close();
}
void initialize_data(unsigned char* data, int size) {
void initialize_data(unsigned char *data, int size) {
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> dis(0, 255);

View File

@ -4,16 +4,14 @@
#include <array>
#include <cassert>
#include <chrono>
#include <cstring>
#include <iostream>
#include <vector>
#include <chrono>
#include <thread>
#include <vector>
#include "gtest/gtest.h"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "communication/bolt/v1/decoder/buffer.hpp"
#include "communication/server.hpp"
@ -31,9 +29,9 @@ class TestOutputStream {};
class TestSession {
public:
TestSession(socket_t&& socket, Dbms& dbms,
QueryEngine<TestOutputStream>& query_engine)
: socket_(std::move(socket)) {
TestSession(socket_t &&socket, Dbms &dbms,
QueryEngine<TestOutputStream> &query_engine)
: socket_(std::move(socket)) {
event_.data.ptr = this;
}
@ -41,21 +39,13 @@ class TestSession {
int Id() const { return socket_.id(); }
void Execute() {
this->socket_.Write(buffer_.data(), buffer_.size());
}
void Execute() { this->socket_.Write(buffer_.data(), buffer_.size()); }
io::network::StreamBuffer Allocate() {
return buffer_.Allocate();
}
io::network::StreamBuffer Allocate() { return buffer_.Allocate(); }
void Written(size_t len) {
buffer_.Written(len);
}
void Written(size_t len) { buffer_.Written(len); }
void Close() {
this->socket_.Close();
}
void Close() { this->socket_.Close(); }
socket_t socket_;
communication::bolt::Buffer<> buffer_;
@ -68,7 +58,7 @@ using test_server_t =
test_server_t *serverptr;
std::atomic<bool> run{true};
void client_run(int num, const char* interface, const char* port) {
void client_run(int num, const char *interface, const char *port) {
endpoint_t endpoint(interface, port);
socket_t socket;
uint8_t data = 0x00;
@ -79,13 +69,12 @@ void client_run(int num, const char* interface, const char* port) {
ASSERT_TRUE(socket.Read(&data, 1));
fprintf(stderr, "CLIENT %d READ 0x%02X!\n", num, data);
ASSERT_EQ(data, 0xAA);
while (run)
std::this_thread::sleep_for(std::chrono::milliseconds(100));
while (run) std::this_thread::sleep_for(std::chrono::milliseconds(100));
socket.Close();
}
void server_run(void* serverptr, int num) {
((test_server_t*)serverptr)->Start(num);
void server_run(void *serverptr, int num) {
((test_server_t *)serverptr)->Start(num);
}
TEST(Network, SocketReadHangOnConcurrentConnections) {
@ -114,8 +103,7 @@ TEST(Network, SocketReadHangOnConcurrentConnections) {
// start clients
std::vector<std::thread> clients;
for (int i = 0; i < Nc; ++i)
clients.push_back(
std::thread(client_run, i, interface, ep.port_str()));
clients.push_back(std::thread(client_run, i, interface, ep.port_str()));
// wait for 2s and stop clients
std::this_thread::sleep_for(std::chrono::seconds(2));
@ -130,8 +118,7 @@ TEST(Network, SocketReadHangOnConcurrentConnections) {
}
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -38,8 +38,8 @@ TEST(Network, Server) {
// start clients
std::vector<std::thread> clients;
for (int i = 0; i < N; ++i)
clients.push_back(
std::thread(client_run, i, interface, ep.port_str(), data, 30000, SIZE));
clients.push_back(std::thread(client_run, i, interface, ep.port_str(), data,
30000, SIZE));
// cleanup clients
for (int i = 0; i < N; ++i) clients[i].join();
@ -50,8 +50,7 @@ TEST(Network, Server) {
}
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -44,8 +44,8 @@ TEST(Network, SessionLeak) {
int testlen = 3000;
for (int i = 0; i < N; ++i) {
clients.push_back(
std::thread(client_run, i, interface, ep.port_str(), data, testlen, testlen));
clients.push_back(std::thread(client_run, i, interface, ep.port_str(), data,
testlen, testlen));
std::this_thread::sleep_for(10ms);
}
@ -62,8 +62,7 @@ TEST(Network, SessionLeak) {
// run with "valgrind --leak-check=full ./network_session_leak" to check for
// memory leaks
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -9,8 +9,8 @@ constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
// This test checks insert_unique method under pressure.
// Test checks for missing data and changed/overwriten data.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -9,8 +9,8 @@ constexpr size_t elems_per_thread = 100000;
// Threads will try to insert keys in the same order.
// This will force threads to compete intensly with each other.
// Test checks for missing data and changed/overwriten data.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -5,9 +5,8 @@ constexpr size_t elems_per_thread = 1e5;
// TODO: document the test
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [&] {
ds::static_array<std::thread, THREADS_NO> threads;
map_t skiplist;
@ -55,8 +54,8 @@ int main() {
// check size
{
auto accessor = skiplist.access();
permanent_assert(accessor.size() == 0, "Size should be 0, but size is "
<< accessor.size());
permanent_assert(accessor.size() == 0,
"Size should be 0, but size is " << accessor.size());
}
// check count

View File

@ -7,8 +7,8 @@ constexpr size_t elements = 2e6;
* Put elements number of elements in the skiplist per each thread and see
* is there any memory leak
*/
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -9,8 +9,8 @@ constexpr size_t elems_per_thread = 16e5;
// 2. analyse this code
// 3. fix the memory leak
// 4. write proper test
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [&] {
ds::static_array<std::thread, THREADS_NO> threads;
@ -59,8 +59,8 @@ int main() {
// check size
{
auto accessor = skiplist.access();
permanent_assert(accessor.size() == 0, "Size should be 0, but size is "
<< accessor.size());
permanent_assert(accessor.size() == 0,
"Size should be 0, but size is " << accessor.size());
}
// check count

View File

@ -12,8 +12,8 @@ constexpr size_t no_insert_for_one_delete = 2;
// Threads will try to insert and remove keys aproximetly in the same order.
// This will force threads to compete intensly with each other.
// Calls of remove method are interleaved with insert calls.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -10,8 +10,8 @@ constexpr size_t no_insert_for_one_delete = 1;
// This test checks remove method under pressure.
// Each thread removes it's own data. So removes are disjoint.
// Calls of remove method are interleaved with insert calls.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -12,8 +12,9 @@ constexpr size_t no_insert_for_one_delete = 2;
// This test checks remove method under pressure.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -10,8 +10,8 @@ constexpr size_t no_insert_for_one_delete = 2;
// This test checks set.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
ConcurrentSet<std::string> skiplist;

View File

@ -14,8 +14,8 @@ constexpr size_t no_insert_for_one_delete = 1;
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
// no_find_per_change and no_insert_for_one_delete.
int main() {
init_log();
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,6 +1,6 @@
#define HARDCODED_OUTPUT_STREAM
#include "gflags/gflags.h"
#include <gflags/gflags.h>
#include "dbms/dbms.hpp"
#include "query_engine_common.hpp"
@ -11,8 +11,6 @@ DECLARE_string(compile_directory);
using namespace std::chrono_literals;
using namespace tests::integration;
Logger logger;
/**
* IMPORTANT: tests only compilation and executability of implemented
* hard code queries (not correctnes of implementation)
@ -24,12 +22,12 @@ int main(int argc, char *argv[]) {
/**
* init arguments
*/
REGISTER_ARGS(argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
/**
* init engine
*/
auto log = init_logging("IntegrationQueryEngine");
init_logging("IntegrationQueryEngine");
// Manually set config compile_path to avoid loading whole config file with
// the test.
FLAGS_compile_directory = "../compiled/";
@ -42,7 +40,7 @@ int main(int argc, char *argv[]) {
// IMPORTANT: PrintRecordStream can be replaces with a smarter
// object that can test the results
WarmUpEngine(log, query_engine, dbms, stream);
WarmUpEngine(query_engine, dbms, stream);
return 0;
}

View File

@ -3,10 +3,11 @@
#include <experimental/filesystem>
#include <set>
namespace fs = std::experimental::filesystem;
#include <glog/logging.h>
#include "database/graph_db_accessor.hpp"
#include "dbms/dbms.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.cpp"
#include "query/engine.hpp"
#include "query/frontend/stripped.hpp"
#include "stream/print_record_stream.hpp"
@ -28,25 +29,20 @@ using StreamT = PrintRecordStream;
*
* @param logger_name the name of a logger
*
* @return logger instance
*/
auto init_logging(const std::string &logger_name) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
return logging::log->logger(logger_name);
void init_logging(const std::string &logger_name) {
google::InitGoogleLogging(logger_name.c_str());
}
/**
* Get query hashes from the file defied with a path.
*
* @param log external logger because this function should be called
* from test binaries
* @param path path to a file with queries
*
* @return a set with all query hashes from the file
*/
auto LoadQueryHashes(Logger &log, const fs::path &path) {
log.info("*** Get query hashes from the file defied with path ***");
auto LoadQueryHashes(const fs::path &path) {
DLOG(INFO) << "*** Get query hashes from the file defied with path ***";
// the intention of following block is to get all hashes
// for which query implementations have to be compiled
// calculate all hashes from queries file
@ -60,23 +56,23 @@ auto LoadQueryHashes(Logger &log, const fs::path &path) {
}
permanent_assert(query_hashes.size() > 0,
"At least one hash has to be present");
log.info("{} different query hashes exist", query_hashes.size());
DLOG(INFO) << fmt::format("{} different query hashes exist",
query_hashes.size());
return query_hashes;
}
/**
* Loads query plans into the engine passed by reference.
*
* @param log external logger reference
* @param engine query engine
* @param query_hashes hashes for which plans have to be loaded
* @param path to a folder with query plan implementations
*
* @return void
*/
auto LoadQueryPlans(Logger &log, QueryEngineT &engine,
const QueryHashesT &query_hashes, const fs::path &path) {
log.info("*** Load/compile needed query implementations ***");
auto LoadQueryPlans(QueryEngineT &engine, const QueryHashesT &query_hashes,
const fs::path &path) {
DLOG(INFO) << "*** Load/compile needed query implementations ***";
auto plan_paths = LoadFilePaths(path, "cpp");
// query mark will be used to extract queries from files (because we want
// to be independent to a query hash)
@ -106,7 +102,7 @@ auto LoadQueryPlans(Logger &log, QueryEngineT &engine,
if (query_hashes.find(query::StrippedQuery(query).hash()) ==
query_hashes.end())
continue;
log.info("Path {} will be loaded.", plan_path.c_str());
DLOG(INFO) << fmt::format("Path {} will be loaded.", plan_path.c_str());
engine.ReloadCustom(query, plan_path);
break;
}
@ -116,7 +112,6 @@ auto LoadQueryPlans(Logger &log, QueryEngineT &engine,
/**
* Execute all query plans in file on the path.
*
* @param log external logger reference
* @param engine query engine
* @param dbms a database to execute queries on
* @param path path a queries file
@ -124,9 +119,9 @@ auto LoadQueryPlans(Logger &log, QueryEngineT &engine,
*
* @return void
*/
auto ExecuteQueryPlans(Logger &log, QueryEngineT &engine, Dbms &dbms,
const fs::path &path, StreamT &stream) {
log.info("*** Execute the queries from the queries_file ***");
auto ExecuteQueryPlans(QueryEngineT &engine, Dbms &dbms, const fs::path &path,
StreamT &stream) {
DLOG(INFO) << "*** Execute the queries from the queries_file ***";
// execute all queries from queries_file
auto queries = utils::ReadLines(path);
for (auto &query : queries) {
@ -146,15 +141,13 @@ auto ExecuteQueryPlans(Logger &log, QueryEngineT &engine, Dbms &dbms,
* -q -> a file with queries
* -i -> a folder with query plans
*
* @param log external logger reference
* @param engine query engine
* @param dbms a database to execute queries on
* @param stream used by query plans to output the results
*
* @return void
*/
auto WarmUpEngine(Logger &log, QueryEngineT &engine, Dbms &dbms,
StreamT &stream) {
auto WarmUpEngine(QueryEngineT &engine, Dbms &dbms, StreamT &stream) {
// path to a file with queries
auto queries_file = fs::path(
GET_ARG("-q", "../data/queries/core/mg_basic_002.txt").get_string());
@ -163,13 +156,13 @@ auto WarmUpEngine(Logger &log, QueryEngineT &engine, Dbms &dbms,
fs::path(GET_ARG("-i", "../integration/hardcoded_query").get_string());
// load all query hashes from queries file
auto query_hashes = LoadQueryHashes(log, queries_file);
auto query_hashes = LoadQueryHashes(queries_file);
// load compile all needed query plans
LoadQueryPlans(log, engine, query_hashes, implementations_folder);
LoadQueryPlans(engine, query_hashes, implementations_folder);
// execute all loaded query plasn
ExecuteQueryPlans(log, engine, dbms, queries_file, stream);
ExecuteQueryPlans(engine, dbms, queries_file, stream);
}
}
}

View File

@ -1,15 +1,13 @@
#include <iostream>
#include "gflags/gflags.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "dbms/dbms.hpp"
#include "query/console.hpp"
#include "query/interpreter.hpp"
#include "utils/random_graph_generator.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
void random_generate(Dbms &dbms, uint node_count, int edge_factor = 5) {
auto dba = dbms.active();
utils::RandomGraphGenerator generator(*dba);
@ -37,10 +35,7 @@ int main(int argc, char *argv[]) {
}
// TODO switch to GFlags, once finally available
if (argc > 2) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
}
if (argc > 2) google::InitGoogleLogging(argv[0]);
Dbms dbms;
std::cout << "Generating graph..." << std::endl;

View File

@ -7,7 +7,7 @@
using namespace std::chrono_literals;
using namespace tests::integration;
int main(int argc, char* argv[]) {
int main(int argc, char *argv[]) {
// init arguments
REGISTER_ARGS(argc, argv);
@ -16,14 +16,14 @@ int main(int argc, char* argv[]) {
fs::path(GET_ARG("-i", "tests/integration/hardcoded_query").get_string());
// init engine
auto log = init_logging("ManualQueryEngine");
init_logging("ManualQueryEngine");
Dbms dbms;
StreamT stream(std::cout); // inject path to data queries
QueryEngineT query_engine;
// IMPORTANT: PrintRecordStream can be replaces with a smarter
// object that can test the results
WarmUpEngine(log, query_engine, dbms, stream);
WarmUpEngine(query_engine, dbms, stream);
// init watcher
FSWatcher watcher;
@ -43,7 +43,7 @@ int main(int argc, char* argv[]) {
auto lines = utils::ReadLines(event.path);
for (int i = 0; i < (int)lines.size(); ++i) {
// find query in the line
auto& line = lines[i];
auto &line = lines[i];
auto pos = line.find(query_mark);
// if query doesn't exist pass
if (pos == std::string::npos) continue;
@ -55,19 +55,20 @@ int main(int argc, char* argv[]) {
++i;
}
log.info("Reload: {}", query);
DLOG(INFO) << fmt::format("Reload: {}", query);
query_engine.Unload(query);
try {
query_engine.ReloadCustom(query, event.path);
auto db_accessor = dbms.active();
query_engine.Run(query, *db_accessor, stream);
} catch (query::PlanCompilationException& e) {
log.info("Query compilation failed: {}", e.what());
} catch (std::exception& e) {
log.info("Query execution failed: unknown reason");
} catch (query::PlanCompilationException &e) {
DLOG(ERROR) << fmt::format("Query compilation failed: {}",
e.what());
} catch (std::exception &e) {
DLOG(WARNING) << "Query execution failed: unknown reason";
}
log.info("Number of available query plans: {}",
query_engine.Size());
DLOG(INFO) << fmt::format("Number of available query plans: {}",
query_engine.Size());
}
}
});

View File

@ -1,8 +1,8 @@
#include <iostream>
#include <vector>
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include <glog/logging.h>
#include "query/frontend/stripped.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/type_discovery.hpp"
@ -14,8 +14,7 @@
* ./query_hash -q "CREATE (n {name: \"test\n"}) RETURN n"
*/
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
// init args
REGISTER_ARGS(argc, argv);

View File

@ -17,8 +17,7 @@ TEST(BoltBuffer, AllocateAndWritten) {
ASSERT_EQ(buffer.size(), 1000);
uint8_t *tmp = buffer.data();
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], tmp[i]);
}
TEST(BoltBuffer, Shift) {
@ -35,21 +34,18 @@ TEST(BoltBuffer, Shift) {
ASSERT_EQ(buffer.size(), 2000);
uint8_t *tmp = buffer.data();
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], tmp[i]);
buffer.Shift(1000);
ASSERT_EQ(buffer.size(), 1000);
tmp = buffer.data();
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i + 1000], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i + 1000], tmp[i]);
}
int main(int argc, char **argv) {
InitializeData(data, SIZE);
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -16,16 +16,17 @@ TEST(BoltBuffer, CorrectChunk) {
DecoderBufferT decoder_buffer(buffer);
StreamBufferT sb = buffer.Allocate();
sb.data[0] = 0x03; sb.data[1] = 0xe8;
sb.data[0] = 0x03;
sb.data[1] = 0xe8;
memcpy(sb.data + 2, data, 1000);
sb.data[1002] = 0; sb.data[1003] = 0;
sb.data[1002] = 0;
sb.data[1003] = 0;
buffer.Written(1004);
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Whole);
ASSERT_EQ(decoder_buffer.Read(tmp, 1000), true);
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], tmp[i]);
ASSERT_EQ(buffer.size(), 0);
}
@ -36,21 +37,21 @@ TEST(BoltBuffer, CorrectChunkTrailingData) {
DecoderBufferT decoder_buffer(buffer);
StreamBufferT sb = buffer.Allocate();
sb.data[0] = 0x03; sb.data[1] = 0xe8;
sb.data[0] = 0x03;
sb.data[1] = 0xe8;
memcpy(sb.data + 2, data, 2002);
sb.data[1002] = 0; sb.data[1003] = 0;
sb.data[1002] = 0;
sb.data[1003] = 0;
buffer.Written(2004);
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Whole);
ASSERT_EQ(decoder_buffer.Read(tmp, 1000), true);
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], tmp[i]);
uint8_t *leftover = buffer.data();
ASSERT_EQ(buffer.size(), 1000);
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i + 1002], leftover[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i + 1002], leftover[i]);
}
TEST(BoltBuffer, InvalidChunk) {
@ -58,9 +59,11 @@ TEST(BoltBuffer, InvalidChunk) {
DecoderBufferT decoder_buffer(buffer);
StreamBufferT sb = buffer.Allocate();
sb.data[0] = 0x03; sb.data[1] = 0xe8;
sb.data[0] = 0x03;
sb.data[1] = 0xe8;
memcpy(sb.data + 2, data, 2002);
sb.data[1002] = 1; sb.data[1003] = 1;
sb.data[1002] = 1;
sb.data[1003] = 1;
buffer.Written(2004);
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Invalid);
@ -68,8 +71,7 @@ TEST(BoltBuffer, InvalidChunk) {
ASSERT_EQ(buffer.size(), 1000);
uint8_t *tmp = buffer.data();
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i + 1002], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i + 1002], tmp[i]);
}
TEST(BoltBuffer, GraduallyPopulatedChunk) {
@ -78,7 +80,8 @@ TEST(BoltBuffer, GraduallyPopulatedChunk) {
DecoderBufferT decoder_buffer(buffer);
StreamBufferT sb = buffer.Allocate();
sb.data[0] = 0x03; sb.data[1] = 0xe8;
sb.data[0] = 0x03;
sb.data[1] = 0xe8;
buffer.Written(2);
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Partial);
@ -90,13 +93,13 @@ TEST(BoltBuffer, GraduallyPopulatedChunk) {
}
sb = buffer.Allocate();
sb.data[0] = 0; sb.data[1] = 0;
sb.data[0] = 0;
sb.data[1] = 0;
buffer.Written(2);
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Whole);
ASSERT_EQ(decoder_buffer.Read(tmp, 1000), true);
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], tmp[i]);
ASSERT_EQ(buffer.size(), 0);
}
@ -107,7 +110,8 @@ TEST(BoltBuffer, GraduallyPopulatedChunkTrailingData) {
DecoderBufferT decoder_buffer(buffer);
StreamBufferT sb = buffer.Allocate();
sb.data[0] = 0x03; sb.data[1] = 0xe8;
sb.data[0] = 0x03;
sb.data[1] = 0xe8;
buffer.Written(2);
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Partial);
@ -119,7 +123,8 @@ TEST(BoltBuffer, GraduallyPopulatedChunkTrailingData) {
}
sb = buffer.Allocate();
sb.data[0] = 0; sb.data[1] = 0;
sb.data[0] = 0;
sb.data[1] = 0;
buffer.Written(2);
sb = buffer.Allocate();
@ -129,19 +134,16 @@ TEST(BoltBuffer, GraduallyPopulatedChunkTrailingData) {
ASSERT_EQ(decoder_buffer.GetChunk(), ChunkStateT::Whole);
ASSERT_EQ(decoder_buffer.Read(tmp, 1000), true);
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], tmp[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], tmp[i]);
uint8_t *leftover = buffer.data();
ASSERT_EQ(buffer.size(), 1000);
for (int i = 0; i < 1000; ++i)
EXPECT_EQ(data[i], leftover[i]);
for (int i = 0; i < 1000; ++i) EXPECT_EQ(data[i], leftover[i]);
}
int main(int argc, char **argv) {
InitializeData(data, SIZE);
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -110,8 +110,7 @@ TEST(BoltChunkedEncoderBuffer, OneAndAHalfOfMaxChunk) {
}
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -4,10 +4,10 @@
#include <iostream>
#include <vector>
#include <glog/logging.h>
#include "dbms/dbms.hpp"
#include "gtest/gtest.h"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
/**
* TODO (mferencevic): document
@ -39,9 +39,7 @@ class TestSocket {
return true;
}
void SetWriteSuccess(bool success) {
write_success_ = success;
}
void SetWriteSuccess(bool success) { write_success_ = success; }
std::vector<uint8_t> output;

View File

@ -1,3 +1,5 @@
#include <glog/logging.h>
#include "bolt_common.hpp"
#include "bolt_testdata.hpp"
@ -412,8 +414,7 @@ TEST(BoltDecoder, Edge) {
int main(int argc, char **argv) {
InitializeData(data, SIZE);
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -15,7 +15,8 @@ using query::TypedValue;
constexpr const int SIZE = 131072;
uint8_t data[SIZE];
uint64_t GetBigEndianInt(std::vector<uint8_t> &v, uint8_t len, uint8_t offset = 1) {
uint64_t GetBigEndianInt(std::vector<uint8_t> &v, uint8_t len,
uint8_t offset = 1) {
uint64_t ret = 0;
v.erase(v.begin(), v.begin() + offset);
for (int i = 0; i < len; ++i) {
@ -217,8 +218,7 @@ TEST(BoltEncoder, BoltV1ExampleMessages) {
int main(int argc, char **argv) {
InitializeData(data, SIZE);
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -52,8 +52,7 @@ TEST(Bolt, ResultStream) {
}
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,4 +1,5 @@
#include "gflags/gflags.h"
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "bolt_common.hpp"
#include "communication/bolt/v1/encoder/result_stream.hpp"
@ -573,8 +574,7 @@ TEST(BoltSession, InvalidChunk) {
}
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
// Set the interpret to true to avoid calling the compiler which only
// supports a limited set of queries.
FLAGS_interpret = true;

View File

@ -1,19 +1,17 @@
#include <iostream>
#include "gtest/gtest.h"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/assert.hpp"
using concurrent_map_t = ConcurrentMap<int, int>;
void print_skiplist(const concurrent_map_t::Accessor &map) {
logging::info("Map now has: ");
for (auto &kv : map) logging::info(" ({}, {})", kv.first, kv.second);
DLOG(INFO) << "Map now has: ";
for (auto &kv : map)
DLOG(INFO) << fmt::format(" ({}, {})", kv.first, kv.second);
}
TEST(ConcurrentMapSkiplist, Mix) {
@ -63,8 +61,7 @@ TEST(ConcurrentMapSkiplist, Mix) {
}
int main(int argc, char **argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();

View File

@ -1,21 +1,17 @@
#include <iostream>
#include "gtest/gtest.h"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "data_structures/concurrent/concurrent_set.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/assert.hpp"
void print_skiplist(const ConcurrentSet<int>::Accessor &skiplist) {
logging::info("Skiplist set now has:");
for (auto &item : skiplist) logging::info("{}", item);
DLOG(INFO) << "Skiplist set now has:";
for (auto &item : skiplist) DLOG(INFO) << item;
}
TEST(ConcurrentSet, Mix) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
ConcurrentSet<int> set;
auto accessor = set.access();
@ -51,6 +47,7 @@ TEST(ConcurrentSet, Mix) {
}
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,8 +1,9 @@
#include <fstream>
#include <thread>
#include "gtest/gtest.h"
#include "logging/default.cpp"
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "utils/fswatcher.hpp"
#include "utils/signals/handler.hpp"
#include "utils/terminate_handler.hpp"
@ -122,8 +123,7 @@ TEST(FSWatcherTest, ModifiyLoop) {
}
int main(int argc, char **argv) {
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();

View File

@ -1,12 +1,11 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "data_structures/ptr_int.hpp"
#include "database/graph_db_accessor.hpp"
#include "dbms/dbms.hpp"
#include "logging/streams/stdout.hpp"
using testing::UnorderedElementsAreArray;
@ -336,6 +335,7 @@ TEST(GraphDbAccessor, VisibilityAfterDeletion) {
}
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

Some files were not shown because too many files have changed in this diff Show More