Merge branch 'dev' into mgcore_T159_inotify

This commit is contained in:
Marko Budiselic 2017-01-03 16:01:53 +01:00
commit 782fc05f93
126 changed files with 5751 additions and 2039 deletions

View File

@ -1,8 +0,0 @@
{
"linters": {
"cppcheck": {
"type": "cppcheck",
"include": ["(\\.cpp$)", "(\\.hpp$)"]
}
}
}

View File

@ -1,82 +1,55 @@
# MemGraph CMake configuration
cmake_minimum_required(VERSION 3.1)
# get directory name
get_filename_component(ProjectId ${CMAKE_SOURCE_DIR} NAME)
# replace whitespaces with underscores
string(REPLACE " " "_" ProjectId ${ProjectId})
# !! IMPORTANT !! run ./project_root/init.sh before cmake command
# to download dependencies
# choose a compiler
# NOTE: must be choosen before use of project() or enable_language()
if (UNIX)
set(CMAKE_C_COMPILER "clang")
set(CMAKE_CXX_COMPILER "clang++")
endif (UNIX)
# -----------------------------------------------------------------------------
# set project name
project(${ProjectId})
# get directory name
get_filename_component(project_name ${CMAKE_SOURCE_DIR} NAME)
# replace whitespaces with underscores
string(REPLACE " " "_" project_name ${project_name})
# set project name
project(${project_name})
# -----------------------------------------------------------------------------
# setup CMake module path, defines path for include() and find_package()
# https://cmake.org/cmake/help/latest/variable/CMAKE_MODULE_PATH.html
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${PROJECT_SOURCE_DIR}/cmake)
# -----------------------------------------------------------------------------
# custom function definitions
include(functions)
# -----------------------------------------------------------------------------
# threading
find_package(Threads REQUIRED)
# flags
# -----------------------------------------------------------------------------
# c++14
set(cxx_standard 14)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++1y")
# -----------------------------------------------------------------------------
# functions
# prints all included directories
function(list_includes)
get_property(dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
PROPERTY INCLUDE_DIRECTORIES)
foreach(dir ${dirs})
message(STATUS "dir='${dir}'")
endforeach()
endfunction(list_includes)
# get file names from list of file paths
function(get_file_names file_paths file_names)
set(file_names "")
foreach(file_path ${file_paths})
get_filename_component (file_name ${file_path} NAME_WE)
list(APPEND file_names ${file_name})
endforeach()
set(file_names "${file_names}" PARENT_SCOPE)
endfunction()
MACRO(SUBDIRLIST result curdir)
FILE(GLOB children RELATIVE ${curdir} ${curdir}/*)
SET(dirlist "")
FOREACH(child ${children})
IF(IS_DIRECTORY ${curdir}/${child})
LIST(APPEND dirlist ${child})
ENDIF()
ENDFOREACH()
SET(${result} ${dirlist})
ENDMACRO()
# custom targets
# move test data data to the build directory
if (UNIX)
set(test_data "tests/data")
set(test_data_src "${CMAKE_SOURCE_DIR}/${test_data}")
set(test_data_dst "${CMAKE_BINARY_DIR}/${test_data}")
add_custom_target (test_data
COMMAND rm -rf ${test_data_dst}
COMMAND cp -r ${test_data_src} ${test_data_dst}
)
endif (UNIX)
# external dependencies
# dir variables
set(src_dir ${CMAKE_SOURCE_DIR}/src)
set(libs_dir ${CMAKE_SOURCE_DIR}/libs)
set(include_dir ${CMAKE_SOURCE_DIR}/include)
set(build_include_dir ${CMAKE_BINARY_DIR}/include)
set(test_include_dir ${CMAKE_BINARY_DIR}/tests/include)
set(test_src_dir ${CMAKE_BINARY_DIR}/tests/src)
# -----------------------------------------------------------------------------
# setup external dependencies
# !! IMPORTANT !! run ./libs/setup.sh before cmake command
# TODO: run from execute_process
# lemon & lempar
set(lemon_dir ${libs_dir}/lemon)
# lexertl
@ -90,14 +63,17 @@ set(yaml_include_dir ${yaml_source_dir}/include)
set(yaml_static_lib ${yaml_source_dir}/libyaml-cpp.a)
# Catch (C++ Automated Test Cases in Headers)
set(catch_source_dir "${libs_dir}/Catch")
# -----------------------------------------------------------------------------
# load cmake modules: cmake/*.cmake
include(gtest)
include(gbenchmark)
# -----------------------------------------------------------------------------
# build memgraph's cypher grammar
# copy grammar file to the build directory
FILE(COPY ${include_dir}/query/language/cypher/cypher.y DESTINATION ${CMAKE_BINARY_DIR})
FILE(COPY ${include_dir}/query/language/cypher/cypher.y
DESTINATION ${CMAKE_BINARY_DIR})
# build cypher parser (only c file - cypher.c)
EXECUTE_PROCESS(
COMMAND ${lemon_dir}/lemon ${CMAKE_BINARY_DIR}/cypher.y -s
@ -109,34 +85,22 @@ FILE(RENAME ${CMAKE_BINARY_DIR}/cypher.c ${CMAKE_BINARY_DIR}/cypher.cpp)
SET(cypher_build_include_dir ${build_include_dir}/cypher)
FILE(MAKE_DIRECTORY ${cypher_build_include_dir})
FILE(RENAME ${CMAKE_BINARY_DIR}/cypher.h ${cypher_build_include_dir}/cypher.h)
# -----------------------------------------------------------------------------
# prepare template and destination folders for query engine (tests)
# and memgraph server binary
# copy query_engine's templates file
FILE(COPY ${src_dir}/query_engine/template DESTINATION ${CMAKE_BINARY_DIR}/tests)
FILE(COPY ${src_dir}/query_engine/template
DESTINATION ${CMAKE_BINARY_DIR}/tests)
FILE(COPY ${src_dir}/query_engine/template DESTINATION ${CMAKE_BINARY_DIR})
# create destination folder for compiled queries
FILE(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/tests/compiled/cpu)
FILE(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/compiled/cpu)
# -----------------------------------------------------------------------------
# TODO: filter header files, all files don't need to be copied
# they are all copied because query engine needs header files during
# query compilation
# TODO: make a function (REMOVE copy pasted part)
# SUBDIRLIST(source_folders ${src_dir})
# foreach(source_folder ${source_folders})
# file(COPY ${src_dir}/${source_folder} DESTINATION ${build_include_dir})
# endforeach()
SUBDIRLIST(source_folders ${src_dir})
foreach(source_folder ${source_folders})
file(COPY ${src_dir}/${source_folder} DESTINATION ${test_src_dir})
endforeach()
SUBDIRLIST(source_folders ${include_dir})
foreach(source_foler ${source_folders})
file(COPY ${include_dir}/${source_folder} DESTINATION ${test_include_dir})
endforeach()
# copy files needed for query engine (headers)
include(copy_includes)
# -----------------------------------------------------------------------------
# linter setup (clang-tidy)
# all source files for linting
@ -146,7 +110,6 @@ FILE(GLOB_RECURSE LINTER_SRC_FILES
${CMAKE_SOURCE_DIR}/poc/.cpp
)
MESSAGE(STATUS "All cpp files for linting are: ${LINTER_SRC_FILES}")
# linter target clang-tidy
find_program(CLANG_TIDY "clang-tidy")
if(CLANG_TIDY)
@ -160,24 +123,29 @@ if(CLANG_TIDY)
-I${CMAKE_SOURCE_DIR}/include -I${fmt_source_dir} -I${yaml_include_dir}
)
endif()
# linter setup
# -----------------------------------------------------------------------------
# debug flags
# TODO: add specific flags
# release flags
set(CMAKE_CXX_FLAGS_RELEASE "-O2 -DNDEBUG")
#debug flags
set(CMAKE_CXX_FLAGS_DEBUG "-g")
# compiler specific flags
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# set(CMAKE_CXX_FLAGS_DEBUG "-Wl,--export-dynamic ${CMAKE_CXX_FLAGS_DEBUG}")
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
# set(CMAKE_CXX_FLAGS_DEBUG "-rdynamic ${CMAKE_CXX_FLAGS_DEBUG}")
endif()
# release flags
set(CMAKE_CXX_FLAGS_RELEASE "-O2 ${CMAKE_CXX_FLAGS_RELEASE}")
# default build type is debug
if ("${CMAKE_BUILD_TYPE}" STREQUAL "")
set(CMAKE_BUILD_TYPE "debug")
endif()
message(STATUS "CMake build type: ${CMAKE_BUILD_TYPE}")
# -----------------------------------------------------------------------------
#debug flags
set(CMAKE_CXX_FLAGS_DEBUG "-g2 ${CMAKE_CXX_FLAGS_DEBUG}")
# TODO: find a way how to applay the defines at the query compile time
# -- configure defines -- default is ON | true | enabled ----------------------
# -- logging ------------------------------------------------------------------
# logging levels
option(LOG_NO_TRACE "Disable trace logging" OFF)
message(STATUS "LOG_NO_TRACE: ${LOG_NO_TRACE}")
if (LOG_NO_TRACE)
@ -207,15 +175,20 @@ message(STATUS "LOG_NO_ERROR: ${LOG_NO_ERROR}")
if (LOG_NO_ERROR)
add_definitions(-DLOG_NO_ERROR)
endif()
# -- logging ------------------------------------------------------------------
# -- logger -------------------------------------------------------------------
option(SYNC_LOGGER "" OFF)
message(STATUS "SYNC LOGGER: ${SYNC_LOGGER}")
# TODO: find a way how to applay those defines at the query compile time
# -----------------------------------------------------------------------------
# logger type
# the default logger is sync logger
# on: cmake ... -DSYNC_LOGGER=OFF ... async logger is going to be used
option(SYNC_LOGGER "Sync logger" ON)
message(STATUS "SYNC_LOGGER: ${SYNC_LOGGER}")
if (SYNC_LOGGER)
add_definitions(-DSYNC_LOGGER)
endif()
# -- logger -------------------------------------------------------------------
# -- assert -------------------------------------------------------------------
# -----------------------------------------------------------------------------
# assert
option(RUNTIME_ASSERT "Enable runtime assertions" ON)
message(STATUS "RUNTIME_ASSERT: ${RUNTIME_ASSERT}")
if(RUNTIME_ASSERT)
@ -227,39 +200,52 @@ message(STATUS "THROW_EXCEPTION_ON_ERROR: ${THROW_EXCEPTION_ON_ERROR}")
if(THROW_EXCEPTION_ON_ERROR)
add_definitions(-DTHROW_EXCEPTION_ON_ERROR)
endif()
# -- assert -------------------------------------------------------------------
# -- ndebug -------------------------------------------------------------------
# -----------------------------------------------------------------------------
# ndebug
option(NDEBUG "No debug" OFF)
message(STATUS "NDEBUG: ${NDEBUG} (be careful CMAKE_BUILD_TYPE can also append this flag)")
message(STATUS "NDEBUG: ${NDEBUG} (be careful CMAKE_BUILD_TYPE can also \
append this flag)")
if(NDEBUG)
add_definitions( -DNDEBUG )
endif()
# -- ndebug -------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -- GLIBCXX_DEBUG ------------------------------------------------------------
# glibcxx debug (useful for gdb)
# the problem is that the query engine doesn't work as it should work if
# this flag is present
# this flag is present (TODO: figure out why)
option(GLIBCXX_DEBUG "glibc debug" OFF)
message(STATUS "GLIBCXX_DEBUG: ${GLIBCXX_DEBUG} (solves problem with _M_dataplus member during a debugging process")
message(STATUS "GLIBCXX_DEBUG: ${GLIBCXX_DEBUG} (solves problem with \
_M_dataplus member during a debugging process)")
if(GLIBCXX_DEBUG)
set(CMAKE_CXX_FLAGS_DEBUG "-D_GLIBCXX_DEBUG ${CMAKE_CXX_FLAGS_DEBUG}")
endif()
# -----------------------------------------------------------------------------
# -- binaries -----------------------------------------------------------------
# option binaries
# memgraph
option(MEMGRAPH "Build memgraph binary" ON)
message(STATUS "MEMGRAPH binary: ${MEMGRAPH}")
# proof of concept
option(POC "Build proof of concept binaries" ON)
message(STATUS "POC binaries: ${POC}")
option(TOOLS "Build tool executables" ON)
message(STATUS "TOOLS binaries: ${TOOLS}")
option(TESTS "Build test binaries" ON)
message(STATUS "TESTS binaries: ${TESTS}")
option(BENCHMARK "Build benchmark binaries" ON)
message(STATUS "BENCHMARK binaries: ${BENCHMARK}")
# -- binaries -----------------------------------------------------------------
# -- configure defines --------------------------------------------------------
# tests
option(ALL_TESTS "Add all test binaries" ON)
message(STATUS "Add all test binaries: ${ALL_TESTS}")
option(BENCHMARK_TESTS "Add benchmark test binaries" OFF)
message(STATUS "Add benchmark test binaries: ${BENCHMARK_TESTS}")
option(CONCURRENT_TESTS "Add concurrent test binaries" OFF)
message(STATUS "Add concurrent test binaries: ${CONCURRENT_TESTS}")
option(INTEGRATION_TESTS "Add integration test binaries" OFF)
message(STATUS "Add integration test binaries: ${INTEGRATION_TESTS}")
option(MANUAL_TESTS "Add manual test binaries" OFF)
message(STATUS "Add manual test binaries: ${MANUAL_TESTS}")
option(UNIT_TESTS "Add unit test binaries" OFF)
message(STATUS "Add unit test binaries: ${UNIT_TESTS}")
# -----------------------------------------------------------------------------
# -- includes -----------------------------------------------------------------
# includes
include_directories(${CMAKE_SOURCE_DIR}/include)
include_directories(${src_dir})
include_directories(${build_include_dir})
@ -274,14 +260,17 @@ include_directories(${r3_source_dir}/include)
# creates build/libcypher_lib.a
add_library(cypher_lib STATIC ${CMAKE_BINARY_DIR}/cypher.cpp)
# -----------------------------------------------------------------------------
# REST API preprocessor
EXECUTE_PROCESS(
COMMAND python link_resources.py
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/api
)
# TODO: remove from here (isolate HTTP server)
# # REST API preprocessor
# EXECUTE_PROCESS(
# COMMAND python link_resources.py
# WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/api
# )
# # ---------------------------------------------------------------------------
# TODO: create separate static library from bolt code
# all memgraph src files
set(memgraph_src_files
${src_dir}/config/config.cpp
${src_dir}/dbms/dbms.cpp
@ -353,29 +342,29 @@ set(memgraph_src_files
${src_dir}/storage/edge_accessor.cpp
${src_dir}/storage/record_accessor.cpp
)
# -----------------------------------------------------------------------------
# STATIC library used by memgraph executables
add_library(memgraph STATIC ${memgraph_src_files})
add_library(memgraph_lib STATIC ${memgraph_src_files})
# -----------------------------------------------------------------------------
# STATIC PIC library used by query engine
add_library(memgraph_pic STATIC ${memgraph_src_files})
set_property(TARGET memgraph_pic PROPERTY POSITION_INDEPENDENT_CODE TRUE)
# tests
if (TESTS)
enable_testing()
add_subdirectory(tests)
endif()
# -----------------------------------------------------------------------------
# proof of concepts
if (POC)
add_subdirectory(poc)
endif()
# -----------------------------------------------------------------------------
# benchmark binaries
if (BENCHMARK)
add_subdirectory(${PROJECT_SOURCE_DIR}/tests/benchmark)
# tests
if (ALL_TESTS OR BENCHMARK_TESTS OR CONCURRENT_TEST OR INTEGRATION_TEST
OR MANUAL_TESTS OR UNIT_TESTS)
add_subdirectory(tests)
endif()
# -----------------------------------------------------------------------------
# memgraph build name
execute_process(
@ -395,19 +384,17 @@ string(STRIP ${COMMIT_NO} COMMIT_NO)
string(STRIP ${COMMIT_HASH} COMMIT_HASH)
set(MEMGRAPH_BUILD_NAME
"memgraph_${COMMIT_NO}_${COMMIT_HASH}_${COMMIT_BRANCH}_${CMAKE_BUILD_TYPE}")
message(STATUS "CMake build type: ${CMAKE_BUILD_TYPE}")
message(STATUS "Debug flags: ${CMAKE_CXX_FLAGS_DEBUG}")
message(STATUS "Release flags: ${CMAKE_CXX_FLAGS_RELEASE}")
# -----------------------------------------------------------------------------
# memgraph main executable
if (MEMGRAPH)
add_executable(${MEMGRAPH_BUILD_NAME} ${src_dir}/memgraph_bolt.cpp)
target_link_libraries(${MEMGRAPH_BUILD_NAME} memgraph)
target_link_libraries(${MEMGRAPH_BUILD_NAME} memgraph_lib)
target_link_libraries(${MEMGRAPH_BUILD_NAME} stdc++fs)
target_link_libraries(${MEMGRAPH_BUILD_NAME} Threads::Threads)
target_link_libraries(${MEMGRAPH_BUILD_NAME} cypher_lib)
if (UNIX)
target_link_libraries(${MEMGRAPH_BUILD_NAME} crypto)
# target_link_libraries(${MEMGRAPH_BUILD_NAME} ssl)
@ -416,3 +403,4 @@ if (MEMGRAPH)
target_link_libraries(${MEMGRAPH_BUILD_NAME} dl)
endif (UNIX)
endif()
# -----------------------------------------------------------------------------

2436
Doxyfile Normal file

File diff suppressed because it is too large Load Diff

BIN
Doxylogo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

View File

@ -118,7 +118,8 @@ FILE(COPY ${include_dir}/utils/char_str.hpp DESTINATION ${build_include_dir}/uti
FILE(COPY ${include_dir}/utils/void.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/array_store.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/bswap.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/stacktrace.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/stacktrace/stacktrace.hpp DESTINATION ${build_include_dir}/utils/stacktrace)
FILE(COPY ${include_dir}/utils/stacktrace/log.hpp DESTINATION ${build_include_dir}/utils/stacktrace)
FILE(COPY ${include_dir}/utils/auto_scope.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/assert.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/reference_wrapper.hpp DESTINATION ${build_include_dir}/utils)
@ -132,7 +133,6 @@ FILE(COPY ${include_dir}/utils/counters/atomic_counter.hpp DESTINATION ${build_i
FILE(COPY ${include_dir}/utils/counters/simple_counter.hpp DESTINATION ${build_include_dir}/utils/counters)
FILE(COPY ${include_dir}/utils/random/fast_binomial.hpp DESTINATION ${build_include_dir}/utils/random)
FILE(COPY ${include_dir}/utils/random/xorshift128plus.hpp DESTINATION ${build_include_dir}/utils/random)
FILE(COPY ${include_dir}/utils/exceptions/basic_exception.hpp DESTINATION ${build_include_dir}/utils/exceptions)
FILE(COPY ${include_dir}/utils/datetime/timestamp.hpp DESTINATION ${build_include_dir}/utils/datetime)
FILE(COPY ${include_dir}/utils/datetime/datetime_error.hpp DESTINATION ${build_include_dir}/utils/datetime)
FILE(COPY ${include_dir}/utils/types/byte.hpp DESTINATION ${build_include_dir}/utils/types)
@ -141,6 +141,10 @@ FILE(COPY ${include_dir}/utils/option.hpp DESTINATION ${build_include_dir}/utils
FILE(COPY ${include_dir}/utils/border.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/order.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/numerics/saturate.hpp DESTINATION ${build_include_dir}/utils/numerics)
FILE(COPY ${include_dir}/utils/memory/stack_allocator.hpp DESTINATION ${build_include_dir}/utils/memory)
FILE(COPY ${include_dir}/utils/memory/block_allocator.hpp DESTINATION ${build_include_dir}/utils/memory)
FILE(COPY ${include_dir}/utils/exceptions/basic_exception.hpp DESTINATION ${build_include_dir}/utils/exceptions)
FILE(COPY ${include_dir}/utils/exceptions/out_of_memory.hpp DESTINATION ${build_include_dir}/utils/exceptions)
FILE(COPY ${include_dir}/utils/iterator/iterator_base.hpp DESTINATION ${build_include_dir}/utils/iterator)
FILE(COPY ${include_dir}/utils/iterator/virtual_iter.hpp DESTINATION ${build_include_dir}/utils/iterator)

29
cmake/functions.cmake Normal file
View File

@ -0,0 +1,29 @@
# prints all included directories
function(list_includes)
get_property(dirs DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
PROPERTY INCLUDE_DIRECTORIES)
foreach(dir ${dirs})
message(STATUS "dir='${dir}'")
endforeach()
endfunction(list_includes)
# get file names from list of file paths
function(get_file_names file_paths file_names)
set(file_names "")
foreach(file_path ${file_paths})
get_filename_component (file_name ${file_path} NAME_WE)
list(APPEND file_names ${file_name})
endforeach()
set(file_names "${file_names}" PARENT_SCOPE)
endfunction()
MACRO(SUBDIRLIST result curdir)
FILE(GLOB children RELATIVE ${curdir} ${curdir}/*)
SET(dirlist "")
FOREACH(child ${children})
IF(IS_DIRECTORY ${curdir}/${child})
LIST(APPEND dirlist ${child})
ENDIF()
ENDFOREACH()
SET(${result} ${dirlist})
ENDMACRO()

2
docs/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
html/
latex/

7
docs/README.md Normal file
View File

@ -0,0 +1,7 @@
# Memgraph Code Documentation
IMPORTANT: auto-generated (run doxygen Doxyfile in the project root)
* HTML - just open docs/html/index.html
* Latex - run make inside docs/latex

View File

@ -1 +0,0 @@
# TODO

View File

@ -7,8 +7,11 @@ namespace bolt
namespace config
{
static constexpr size_t N = 65535; /* chunk size */
static constexpr size_t C = N + 2; /* end mark */
/** chunk size */
static constexpr size_t N = 65535;
/** end mark */
static constexpr size_t C = N + 2;
}
}

View File

@ -5,17 +5,38 @@ namespace bolt
enum class PackType
{
Null, // denotes absence of a value
Boolean, // denotes a type with two possible values (t/f)
Integer, // 64-bit signed integral number
Float, // 64-bit floating point number
Bytes, // binary data
String, // unicode string
List, // collection of values
Map, // collection of zero or more key/value pairs
Struct, // zero or more packstream values
EndOfStream, // denotes stream value end
Reserved // reserved for future use
/** denotes absence of a value */
Null,
/** denotes a type with two possible values (t/f) */
Boolean,
/** 64-bit signed integral number */
Integer,
/** 64-bit floating point number */
Float,
/** binary data */
Bytes,
/** unicode string */
String,
/** collection of values */
List,
/** collection of zero or more key/value pairs */
Map,
/** zero or more packstream values */
Struct,
/** denotes stream value end */
EndOfStream,
/** reserved for future use */
Reserved
};
}

View File

@ -27,7 +27,7 @@ class BoltSerializer
public:
BoltSerializer(Stream &stream) : encoder(stream) {}
/* Serializes the vertex accessor into the packstream format
/** Serializes the vertex accessor into the packstream format
*
* struct[size = 3] Vertex [signature = 0x4E] {
* Integer node_id;
@ -64,7 +64,7 @@ public:
}
}
/* Serializes the vertex accessor into the packstream format
/** Serializes the vertex accessor into the packstream format
*
* struct[size = 5] Edge [signature = 0x52] {
* Integer edge_id;
@ -79,7 +79,7 @@ public:
void write_null() { encoder.write_null(); }
void write(const Null &v) { encoder.write_null(); }
void write(const Null &) { encoder.write_null(); }
void write(const Bool &prop) { encoder.write_bool(prop.value()); }

View File

@ -10,9 +10,10 @@
namespace bolt
{
// compiled queries have to use this class in order to return results
// query code should not know about bolt protocol
/**
* compiled queries have to use this class in order to return results
* query code should not know about bolt protocol
*/
template <class Socket>
class RecordStream
{

View File

@ -1,4 +1,6 @@
#pragma once
/* Memgraph Communication protocol
/* Memgraph communication protocol
* gate is the first name proposal for the protocol */
// TODO

View File

@ -1,3 +1,3 @@
#pragma once
/* HTTP & HTTPS implementation */
/* TODO: HTTP & HTTPS implementations */

View File

@ -0,0 +1,76 @@
#pragma once
#include <bitset>
#include <iostream>
#include <vector>
/**
* Implementation of a generic Bloom Filter.
* Read more about bloom filters here:
* http://en.wikipedia.org/wiki/Bloom_filter
* http://www.jasondavies.com/bloomfilter/
*
* Type specifies the type of data stored
*/
template <class Type, int BucketSize = 8>
class BloomFilter
{
private:
using HashFunction = std::function<uint64_t(const Type &)>;
using CompresionFunction = std::function<int(uint64_t)>;
std::bitset<BucketSize> filter_;
std::vector<HashFunction> hashes_;
CompresionFunction compression_;
std::vector<int> buckets;
int default_compression(uint64_t hash) { return hash % BucketSize; }
void get_buckets(const Type &data)
{
for (int i = 0; i < hashes_.size(); i++)
buckets[i] = compression_(hashes_[i](data));
}
void print_buckets(std::vector<uint64_t> &buckets)
{
for (int i = 0; i < buckets.size(); i++)
{
std::cout << buckets[i] << " ";
}
std::cout << std::endl;
}
public:
BloomFilter(std::vector<HashFunction> funcs,
CompresionFunction compression = {})
: hashes_(funcs)
{
if (!compression)
compression_ = std::bind(&BloomFilter::default_compression, this,
std::placeholders::_1);
else
compression_ = compression;
buckets.resize(hashes_.size());
}
bool contains(const Type &data)
{
get_buckets(data);
bool contains_element = true;
for (int i = 0; i < buckets.size(); i++)
contains_element &= filter_[buckets[i]];
return contains_element;
}
void insert(const Type &data)
{
get_buckets(data);
for (int i = 0; i < buckets.size(); i++)
filter_[buckets[i]] = true;
}
};

View File

@ -0,0 +1,36 @@
#pragma once
#include "data_structures/concurrent/common.hpp"
#include "data_structures/concurrent/skiplist.hpp"
#include "data_structures/concurrent/concurrent_map.hpp"
using std::pair;
template <class Key, class Value, class BloomFilter>
class ConcurrentBloomMap {
using item_t = Item<Key, Value>;
using list_it = typename SkipList<item_t>::Iterator;
private:
ConcurrentMap<Key, Value> map_;
BloomFilter filter_;
public:
ConcurrentBloomMap(BloomFilter filter) : filter_(filter) {}
std::pair<list_it, bool> insert(const Key &key, const Value &data) {
filter_.insert(key);
auto accessor = std::move(map_.access());
return accessor.insert(key, data);
}
bool contains(const Key &key) {
if (!filter_.contains(key)) return false;
auto accessor = map_.access();
return accessor.contains(key);
}
};

View File

@ -70,7 +70,7 @@ private:
{
assert(list != nullptr);
// Increment number of iterators accessing list.
list->count++;
list->active_threads_no_++;
// Start from the begining of list.
reset();
}
@ -99,7 +99,7 @@ private:
// Fetch could be relaxed
// There exist possibility that no one will delete garbage at this
// time but it will be deleted at some other time.
if (list->count.fetch_sub(1) == 1 && // I am the last one accessing
if (list->active_threads_no_.fetch_sub(1) == 1 && // I am the last one accessing
head_rem != nullptr && // There is some garbage
cas<Node *>(list->removed, head_rem,
nullptr) // No new garbage was added.
@ -177,6 +177,8 @@ private:
store(node->next, next);
// Then try to set as head.
} while (!cas(list->head, next, node));
list->count_.fetch_add(1);
}
// True only if this call removed the element. Only reason for fail is
@ -200,6 +202,7 @@ private:
}
// Add to list of to be garbage collected.
store(curr->next_rem, swap(list->removed, curr));
list->count_.fetch_sub(1);
return true;
}
return false;
@ -321,10 +324,14 @@ public:
ConstIterator cend() { return ConstIterator(); }
std::size_t size() { return count.load(std::memory_order_consume); }
std::size_t active_threads_no() { return active_threads_no_.load(); }
std::size_t size() { return count_.load(); }
private:
std::atomic<std::size_t> count{0};
// TODO: use lazy GC or something else as a garbage collection strategy
// use the same principle as in skiplist
std::atomic<std::size_t> active_threads_no_{0};
std::atomic<std::size_t> count_{0};
std::atomic<Node *> head{nullptr};
std::atomic<Node *> removed{nullptr};
};

View File

@ -5,9 +5,12 @@
using std::pair;
// Multi thread safe map based on skiplist.
// K - type of key.
// T - type of data.
/**
* Multi thread safe map based on skiplist.
*
* @tparam K is a type of key.
* @tparam T is a type of data.
*/
template <typename K, typename T>
class ConcurrentMap
{

View File

@ -5,9 +5,12 @@
using std::pair;
// Multi thread safe multi map based on skiplist.
// K - type of key.
// T - type of data.
/**
* Multi thread safe multi map based on skiplist.
*
* @tparam K is a type of key.
* @tparam T is a type of data.
*/
template <typename K, typename T>
class ConcurrentMultiMap
{

View File

@ -12,7 +12,7 @@
#include "data_structures/concurrent/skiplist_gc.hpp"
/* @brief Concurrent lock-based skiplist with fine grained locking
/** @brief Concurrent lock-based skiplist with fine grained locking
*
* From Wikipedia:
* "A skip list is a data structure that allows fast search within an
@ -97,11 +97,13 @@ template <class T, size_t H = 32, class lock_t = SpinLock>
class SkipList : private Lockable<lock_t>
{
public:
// computes the height for the new node from the interval [1...H]
// with p(k) = (1/2)^k for all k from the interval
/**
* computes the height for the new node from the interval [1...H]
* with p(k) = (1/2)^k for all k from the interval
*/
static thread_local FastBinomial<H> rnd;
/* @brief Wrapper class for flags used in the implementation
/** @brief Wrapper class for flags used in the implementation
*
* MARKED flag is used to logically delete a node.
* FULLY_LINKED is used to mark the node as fully inserted, i.e. linked
@ -224,12 +226,14 @@ public:
Placeholder<T> data;
// this creates an array of the size zero. we can't put any sensible
// value here since we don't know what size it will be untill the
// node is allocated. we could make it a Node** but then we would
// have two memory allocations, one for node and one for the forward
// list. this way we avoid expensive malloc/free calls and also cache
// thrashing when following a pointer on the heap
/**
* this creates an array of the size zero. we can't put any sensible
* value here since we don't know what size it will be untill the
* node is allocated. we could make it a Node** but then we would
* have two memory allocations, one for node and one for the forward
* list. this way we avoid expensive malloc/free calls and also cache
* thrashing when following a pointer on the heap
*/
std::atomic<Node *> tower[0];
};
@ -441,6 +445,7 @@ public:
}
private:
// TODO: figure why start is unused
static int update_path(SkipList *skiplist, int start, const K &item,
Node *preds[], Node *succs[])
{
@ -664,14 +669,18 @@ private:
return (node == nullptr) || item < node->value();
}
// Returns first occurence of item if there exists one.
/**
* Returns first occurence of item if there exists one.
*/
template <class K>
ConstIterator find(const K &item) const
{
return const_cast<SkipList *>(this)->find_node<ConstIterator, K>(item);
}
// Returns first occurence of item if there exists one.
/**
* Returns first occurence of item if there exists one.
*/
template <class K>
Iterator find(const K &item)
{
@ -689,7 +698,9 @@ private:
}
}
// Returns iterator on searched element or the first larger element.
/**
* Returns iterator on searched element or the first larger element.
*/
template <class It, class K>
It find_or_larger(const K &item)
{
@ -758,8 +769,11 @@ private:
return valid;
}
// Inserts non unique data into list.
// NOTE: Uses modified logic from insert method.
/**
* Inserts non unique data into list.
*
* NOTE: Uses modified logic from insert method.
*/
Iterator insert_non_unique(T &&data, Node *preds[], Node *succs[])
{
while (true) {
@ -823,9 +837,12 @@ private:
}
}
// Insert unique data
// F - type of funct which will create new node if needed. Recieves height
// of node.
/**
* Insert unique data
*
* F - type of funct which will create new node if needed. Recieves height
* of node.
*/
std::pair<Iterator, bool> insert(Node *preds[], Node *succs[], T &&data)
{
while (true) {
@ -857,8 +874,11 @@ private:
}
}
// Insert unique data
// NOTE: This is almost all duplicate code from insert.
/**
* Insert unique data
*
* NOTE: This is almost all duplicate code from insert.
*/
template <class K, class... Args>
std::pair<Iterator, bool> emplace(Node *preds[], Node *succs[], K &key,
Args &&... args)
@ -893,9 +913,11 @@ private:
}
}
// Inserts data to specified locked location.
/**
* Inserts data to specified locked location.
*/
Iterator insert_here(Node *new_node, Node *preds[], Node *succs[],
int height, guard_t guards[])
int height, guard_t guards[]) // TODO: querds unused
{
// Node::create(std::move(data), height)
// link the predecessors and successors, e.g.
@ -921,10 +943,12 @@ private:
!node->flags.is_marked();
}
// Remove item found with fp with arguments skiplist,preds and succs.
// fp has to fill preds and succs which reflect location of item or return
// -1 as in not found otherwise returns level on which the item was first
// found.
/**
* Removes item found with fp with arguments skiplist, preds and succs.
* fp has to fill preds and succs which reflect location of item or return
* -1 as in not found otherwise returns level on which the item was first
* found.
*/
template <class K>
bool remove(const K &item, Node *preds[], Node *succs[],
int (*fp)(SkipList *, int, const K &, Node *[], Node *[]))
@ -966,7 +990,9 @@ private:
}
}
// number of elements
/**
* number of elements
*/
std::atomic<size_t> count{0};
Node *header;
SkiplistGC<Node> gc;

View File

@ -1,46 +0,0 @@
#pragma once
#include <list>
#include "threading/sync/lockable.hpp"
#include "threading/sync/spinlock.hpp"
template <typename value_type, typename lock_type = SpinLock>
class LinkedList : public Lockable<lock_type>
{
public:
std::size_t size() const
{
auto guard = this->acquire_unique();
return data.size();
}
void push_front(const value_type &value)
{
auto guard = this->acquire_unique();
data.push_front(value);
}
void push_front(value_type &&value)
{
auto guard = this->acquire_unique();
data.push_front(std::forward<value_type>(value));
}
void pop_front()
{
auto guard = this->acquire_unique();
data.pop_front();
}
// value_type& as return value
// would not be concurrent
value_type front()
{
auto guard = this->acquire_unique();
return data.front();
}
private:
std::list<value_type> data;
};

View File

@ -1,34 +0,0 @@
#pragma once
#include <unordered_map>
#include "threading/sync/lockable.hpp"
#include "threading/sync/spinlock.hpp"
namespace lockfree
{
template <class K, class V>
class HashMap : Lockable<SpinLock>
{
public:
V at(const K& key)
{
auto guard = acquire_unique();
return hashmap[key];
}
void put(const K& key, const K& value)
{
auto guard = acquire_unique();
hashmap[key] = value;
}
private:
std::unordered_map<K, V> hashmap;
};
}

View File

@ -10,44 +10,85 @@
class Indexes;
// Main class which represents Database concept in code.
// TODO: Maybe split this in another layer between Db and Dbms. Where the new
// layer would hold SnapshotEngine and his kind of concept objects. Some
// guidelines would be: retain objects which are necessary to implement querys
// in Db, the rest can be moved to the new layer.
/**
* Main class which represents Database concept in code.
*/
class Db
{
public:
using sptr = std::shared_ptr<Db>;
// import_snapshot will in constructor import latest snapshot into the db.
// NOTE: explicit is here to prevent compiler from evaluating const char *
// into a bool.
/**
* This constructor will create a database with the name "default"
*
* NOTE: explicit is here to prevent compiler from evaluating const char *
* into a bool.
*
* @param import_snapshot will in constructor import latest snapshot
* into the db.
*/
explicit Db(bool import_snapshot = true);
// import_snapshot will in constructor import latest snapshot into the db.
/**
* Construct database with a custom name.
*
* @param name database name
* @param import_snapshot will in constructor import latest snapshot
* into the db.
*/
Db(const char *name, bool import_snapshot = true);
// import_snapshot will in constructor import latest snapshot into the db.
/**
* Construct database with a custom name.
*
* @param name database name
* @param import_snapshot will in constructor import latest snapshot
* into the db.
*/
Db(const std::string &name, bool import_snapshot = true);
/**
* Database object can't be copied.
*/
Db(const Db &db) = delete;
private:
/** database name */
const std::string name_;
public:
/** transaction engine related to this database */
tx::Engine tx_engine;
/** graph related to this database */
Graph graph;
/** garbage collector related to this database*/
Garbage garbage = {tx_engine};
// This must be initialized after name.
/**
* snapshot engine related to this database
*
* \b IMPORTANT: has to be initialized after name
* */
SnapshotEngine snap_engine = {*this};
// Creates Indexes for this db.
/**
* Creates Indexes for this database.
*/
Indexes indexes();
// TODO: Indexes should be created only once somwhere Like Db or layer
// between Db and Dbms.
Indexes indexes();
/**
* Returns a name of the database.
*
* @return database name
*/
std::string const &name() const;
};

View File

@ -1,5 +1,7 @@
#pragma once
#include <experimental/filesystem>
#include "database/db.hpp"
#include "logging/default.hpp"
#include "query/exception/query_engine.hpp"
@ -13,6 +15,8 @@
* -> [code_compiler] -> code_executor
*/
namespace fs = std::experimental::filesystem;
// query engine has to be aware of the Stream because Stream
// is passed to the dynamic shared library
template <typename Stream>
@ -44,6 +48,17 @@ public:
}
}
// preload functionality
auto load(const uint64_t hash, const fs::path& path)
{
program_loader.load(hash, path);
}
auto load(const std::string& query)
{
program_loader.load(query);
}
protected:
Logger logger;

View File

@ -20,9 +20,6 @@ public:
std::string flags;
// TODO: sync this with cmake configuration
#ifdef BARRIER
flags += " -DBARRIER";
#endif
#ifdef NDEBUG
flags += " -DNDEBUG -O2";
#endif
@ -53,9 +50,6 @@ public:
"-I../include",
"-I../libs/fmt", // TODO: load from config
"-I../../libs/fmt", "-L./ -L../",
#ifdef BARRIER
"-lbarrier_pic",
#endif
"-lmemgraph_pic",
"-shared -fPIC" // shared library flags
);
@ -67,6 +61,8 @@ public:
// if compilation has failed throw exception
if (compile_status == -1) {
logger.debug("FAIL: Query Code Compilation: {} -> {}", in_file,
out_file);
throw PlanCompilationException(
"Code compilation error. Generated code is not compilable or "
"compilation settings are wrong");

View File

@ -3,6 +3,7 @@
#include <memory>
#include <string>
#include <unordered_map>
#include <experimental/filesystem>
#include "config/config.hpp"
#include "logging/default.hpp"
@ -16,6 +17,8 @@
#include "utils/file.hpp"
#include "utils/hashing/fnv.hpp"
namespace fs = std::experimental::filesystem;
template <typename Stream>
class ProgramLoader
{
@ -26,6 +29,16 @@ public:
ProgramLoader() : logger(logging::log->logger("PlanLoader")) {}
// TODO: decouple load(query) method
auto load(const uint64_t hash, const fs::path &path)
{
// TODO: get lib path (that same folder as path folder or from config)
// TODO: compile
// TODO: dispose the old lib
// TODO: store the compiled lib
}
auto load(const std::string &query)
{
auto preprocessed = preprocessor.preprocess(query);

View File

@ -8,8 +8,6 @@
auto VertexAccessor::out() const
{
DbTransaction &t = this->db;
std::cout << "VA OUT" << std::endl;
std::cout << record->data.out.size() << std::endl;
return iter::make_map(iter::make_iter_ref(record->data.out),
[&](auto e) -> auto { return EdgeAccessor(*e, t); });
}

View File

@ -5,14 +5,28 @@
#include "storage/label/label_store.hpp"
#include "storage/vertices.hpp"
/**
* Graph storage. Contains vertices and edges, labels and edges.
*/
class Graph
{
public:
Graph() {}
/**
* default constructor
*
* At the beginning the graph is empty.
*/
Graph() = default;
/** storage for all vertices related to this graph */
Vertices vertices;
/** storage for all edges related to this graph */
Edges edges;
/** storage for all labels */
LabelStore label_store;
/** storage for all types related for this graph */
EdgeTypeStore edge_type_store;
};

View File

@ -25,9 +25,12 @@
// parmanant exception will always be executed
#define permanent_assert(condition, message) \
if (!(condition)) { \
if (!(condition)) \
{ \
std::ostringstream s; \
s << message; \
std::cout << s.str() << std::endl; \
std::exit(EXIT_FAILURE); \
}
// assert_error_handler_(__FILE__, __LINE__, s.str().c_str());

View File

@ -2,33 +2,34 @@
#include <utility>
/* @brief Calls a cleanup function on scope exit
/**
* @brief Calls a cleanup function on scope exit
*
* consider this example:
* consider this example:
*
* void hard_worker()
* {
* resource.enable();
* do_stuff(); // throws exception
* resource.disable();
* }
* void hard_worker()
* {
* resource.enable();
* do_stuff(); // throws exception
* resource.disable();
* }
*
* if do_stuff throws an exception, resource.disable is never called
* and the app is left in an inconsistent state. ideally, you would like
* to call resource.disable regardles of the exception being thrown.
* OnScopeExit makes this possible and very convenient via a 'Auto' macro
* if do_stuff throws an exception, resource.disable is never called
* and the app is left in an inconsistent state. ideally, you would like
* to call resource.disable regardles of the exception being thrown.
* OnScopeExit makes this possible and very convenient via a 'Auto' macro
*
* void hard_worker()
* {
* resource.enable();
* Auto(resource.disable());
* do_stuff(); // throws exception
* }
* void hard_worker()
* {
* resource.enable();
* Auto(resource.disable());
* do_stuff(); // throws exception
* }
*
* now, resource.disable will be called every time it goes out of scope
* regardless of the exception
* now, resource.disable will be called every time it goes out of scope
* regardless of the exception
*
* @tparam F Lambda which holds a wrapper function around the cleanup code
* @tparam F Lambda which holds a wrapper function around the cleanup code
*/
template <class F>
class OnScopeExit
@ -55,3 +56,10 @@ private:
TOKEN_PASTE(auto_, counter)(TOKEN_PASTE(auto_func_, counter));
#define Auto(Destructor) Auto_INTERNAL(Destructor, __COUNTER__)
// -- example:
// Auto(f());
// -- is expended to:
// auto auto_func_1 = [&]() { f(); };
// OnScopeExit<decltype(auto_func_1)> auto_1(auto_func_1);
// -- f() is called at the end of a scope

View File

@ -4,39 +4,25 @@
#include <stdexcept>
#include "utils/auto_scope.hpp"
#include "utils/stacktrace.hpp"
#include "utils/stacktrace/stacktrace.hpp"
class BasicException : public std::exception {
public:
BasicException(const std::string &message, uint64_t stacktrace_size) noexcept
: message_(message),
stacktrace_size_(stacktrace_size) {
generate_stacktrace();
}
BasicException(const std::string &message) noexcept : message_(message),
stacktrace_size_(10) {
generate_stacktrace();
}
template <class... Args>
BasicException(const std::string &format, Args &&... args) noexcept
: BasicException(fmt::format(format, std::forward<Args>(args)...)) {}
const char *what() const noexcept override { return message_.c_str(); }
private:
std::string message_;
uint64_t stacktrace_size_;
void generate_stacktrace() {
#ifndef NDEBUG
Stacktrace stacktrace;
int size = std::min(stacktrace_size_, stacktrace.size());
for (int i = 0; i < size; i++) {
message_.append(fmt::format("\n at {} ({})", stacktrace[i].function,
stacktrace[i].location));
class BasicException : public std::exception
{
public:
BasicException(const std::string &message) noexcept : message_(message)
{
Stacktrace stacktrace;
message_.append(stacktrace.dump());
}
#endif
}
template <class... Args>
BasicException(const std::string &format, Args &&... args) noexcept
: BasicException(fmt::format(format, std::forward<Args>(args)...))
{
}
const char *what() const noexcept override { return message_.c_str(); }
private:
std::string message_;
};

View File

@ -6,4 +6,6 @@ class NotYetImplemented : public BasicException
{
public:
using BasicException::BasicException;
NotYetImplemented() : BasicException("") {}
};

View File

@ -5,6 +5,9 @@
#include "utils/auto_scope.hpp"
/* @brief Allocates blocks of block_size and stores
* the pointers on allocated blocks inside a vector.
*/
template <size_t block_size>
class BlockAllocator
{
@ -23,29 +26,45 @@ public:
BlockAllocator(size_t capacity = 0)
{
for (size_t i = 0; i < capacity; ++i)
blocks.emplace_back();
unused_.emplace_back();
}
~BlockAllocator()
{
for (auto b : blocks) {
free(b.data);
}
blocks.clear();
for (auto block : unused_)
free(block.data);
unused_.clear();
for (auto block : release_)
free(block.data);
release_.clear();
}
size_t unused_size() const
{
return unused_.size();
}
size_t release_size() const
{
return release_.size();
}
// Returns nullptr on no memory.
void *acquire()
{
if (blocks.size() == 0) blocks.emplace_back();
if (unused_.size() == 0) unused_.emplace_back();
auto ptr = blocks.back().data;
Auto(blocks.pop_back());
auto ptr = unused_.back().data;
Auto(unused_.pop_back());
return ptr;
}
void release(void *ptr) { blocks.emplace_back(ptr); }
void release(void *ptr) { release_.emplace_back(ptr); }
private:
std::vector<Block> blocks;
// TODO: try implement with just one vector
// but consecutive acquire release calls should work
// TODO: measure first!
std::vector<Block> unused_;
std::vector<Block> release_;
};

View File

@ -3,6 +3,7 @@
#include <cmath>
#include "utils/exceptions/out_of_memory.hpp"
#include "utils/likely.hpp"
#include "utils/memory/block_allocator.hpp"
// http://en.cppreference.com/w/cpp/language/new

View File

@ -0,0 +1,47 @@
#include <csignal>
#include <functional>
#include <iostream>
#include <map>
#include <string>
#include <utility>
#include <vector>
using Function = std::function<void()>;
// TODO: align bits so signals can be combined
// Signal::Terminate | Signal::Interupt
enum class Signal : int
{
Terminate = SIGTERM,
SegmentationFault = SIGSEGV,
Interupt = SIGINT,
Quit = SIGQUIT,
Abort = SIGABRT,
BusError = SIGBUS,
};
class SignalHandler
{
private:
static std::map<int, std::function<void()>> handlers_;
static void handle(int signal) { handlers_[signal](); }
public:
static void register_handler(Signal signal, Function func)
{
int signal_number = static_cast<int>(signal);
handlers_[signal_number] = func;
std::signal(signal_number, SignalHandler::handle);
}
// TODO possible changes if signelton needed later
/*
static SignalHandler& instance() {
static SignalHandler instance;
return instance;
}
*/
};
std::map<int, std::function<void()>> SignalHandler::handlers_ = {};

View File

@ -0,0 +1,11 @@
#pragma once
#include "logging/default.hpp"
#include "utils/stacktrace/stacktrace.hpp"
void log_stacktrace(const std::string& title)
{
Stacktrace stacktrace;
logging::info(title);
logging::info(stacktrace.dump());
}

View File

@ -1,10 +1,10 @@
#pragma once
#include <cxxabi.h>
#include <stdexcept>
#include <execinfo.h>
#include <fmt/format.h>
#include <stdexcept>
#include "utils/auto_scope.hpp"
class Stacktrace
@ -13,11 +13,13 @@ public:
class Line
{
public:
Line(const std::string& original) : original(original) {}
Line(const std::string &original) : original(original) {}
Line(const std::string& original, const std::string& function,
const std::string& location)
: original(original), function(function), location(location) {}
Line(const std::string &original, const std::string &function,
const std::string &location)
: original(original), function(function), location(location)
{
}
std::string original, function, location;
};
@ -26,17 +28,17 @@ public:
Stacktrace()
{
void* addresses[stacktrace_depth];
void *addresses[stacktrace_depth];
auto depth = backtrace(addresses, stacktrace_depth);
// will this leak if backtrace_symbols throws?
char** symbols = nullptr;
char **symbols = nullptr;
Auto(free(symbols));
symbols = backtrace_symbols(addresses, depth);
// skip the first one since it will be Stacktrace::Stacktrace()
for(int i = 1; i < depth; ++i)
for (int i = 1; i < depth; ++i)
lines.emplace_back(format(symbols[i]));
}
@ -48,40 +50,53 @@ public:
auto end() const { return lines.end(); }
auto cend() const { return lines.cend(); }
const Line& operator[](size_t idx) const
const Line &operator[](size_t idx) const { return lines[idx]; }
size_t size() const { return lines.size(); }
template <class Stream>
void dump(Stream &stream)
{
return lines[idx];
stream << dump();
}
size_t size() const
std::string dump()
{
return lines.size();
std::string message;
for (size_t i = 0; i < size(); i++)
{
message.append(fmt::format("at {} ({}) \n", lines[i].function,
lines[i].location));
}
return message;
}
private:
std::vector<Line> lines;
Line format(const std::string& original)
Line format(const std::string &original)
{
using namespace abi;
auto line = original;
auto begin = line.find('(');
auto end = line.find('+');
auto end = line.find('+');
if(begin == std::string::npos || end == std::string::npos)
if (begin == std::string::npos || end == std::string::npos)
return {original};
line[end] = '\0';
int s;
auto demangled = __cxa_demangle(line.data() + begin + 1, nullptr,
nullptr, &s);
auto demangled =
__cxa_demangle(line.data() + begin + 1, nullptr, nullptr, &s);
auto location = line.substr(0, begin);
auto function = demangled ? std::string(demangled)
: fmt::format("{}()", original.substr(begin + 1, end - begin - 1));
auto function =
demangled ? std::string(demangled)
: fmt::format("{}()", original.substr(begin + 1,
end - begin - 1));
return {original, function, location};
}

View File

@ -1,24 +1,67 @@
#pragma mark
#include "sys/types.h"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "sys/sysinfo.h"
#include "sys/types.h"
auto total_virtual_memory()
{
struct sysinfo mem_info;
sysinfo (&mem_info);
long long total_virtual_memory = mem_info.totalram;
total_virtual_memory += mem_info.totalswap;
total_virtual_memory *= mem_info.mem_unit;
return total_virtual_memory;
struct sysinfo mem_info;
sysinfo(&mem_info);
long long total_virtual_memory = mem_info.totalram;
total_virtual_memory += mem_info.totalswap;
total_virtual_memory *= mem_info.mem_unit;
return total_virtual_memory;
}
auto used_virtual_memory()
{
struct sysinfo mem_info;
sysinfo (&mem_info);
struct sysinfo mem_info;
sysinfo(&mem_info);
long long virtual_memory_used = mem_info.totalram - mem_info.freeram;
virtual_memory_used += mem_info.totalswap - mem_info.freeswap;
virtual_memory_used *= mem_info.mem_unit;
return virtual_memory_used;
}
// TODO: OS dependent
/**
* parses memory line from /proc/self/status
*/
auto parse_vm_size(char *line)
{
// This assumes that a digit will be found and the line ends in " Kb".
auto i = std::strlen(line);
const char *p = line;
while (*p < '0' || *p > '9')
p++;
line[i - 3] = '\0';
return std::atoll(p);
}
/**
* returns VmSize in kB
*/
auto vm_size()
{
std::FILE *file = std::fopen("/proc/self/status", "r");
auto result = -1LL;
char line[128];
while (fgets(line, 128, file) != NULL)
{
if (strncmp(line, "VmSize:", 7) == 0)
{
result = parse_vm_size(line);
break;
}
}
fclose(file);
return result;
}

View File

@ -1,30 +1,20 @@
#pragma once
#include "utils/auto_scope.hpp"
#include "utils/stacktrace/stacktrace.hpp"
#include <iostream>
#include <execinfo.h>
#include <iostream>
// TODO: log to local file or remote database
void stacktrace(std::ostream& stream) noexcept
void stacktrace(std::ostream &stream) noexcept
{
void* array[50];
int size = backtrace(array, 50);
stream << __FUNCTION__ << " backtrace returned "
<< size << " frames." << std::endl;
char** messages = backtrace_symbols(array, size);
Auto(free(messages));
for (int i = 0; i < size && messages != NULL; ++i)
stream << "[bt]: (" << i << ") " << messages[i] << std::endl;
stream << std::endl;
Stacktrace stacktrace;
stacktrace.dump(stream);
}
// TODO: log to local file or remote database
void terminate_handler(std::ostream& stream) noexcept
void terminate_handler(std::ostream &stream) noexcept
{
if (auto exc = std::current_exception())
{
@ -32,17 +22,13 @@ void terminate_handler(std::ostream& stream) noexcept
{
std::rethrow_exception(exc);
}
catch(std::exception& ex)
catch (std::exception &ex)
{
stream << ex.what() << std::endl << std::endl;
stacktrace(stream);
}
}
std::abort();
}
void terminate_handler() noexcept
{
terminate_handler(std::cout);
}
void terminate_handler() noexcept { terminate_handler(std::cout); }

View File

@ -0,0 +1,7 @@
#pragma once
#include <chrono>
using namespace std::chrono_literals;
using ms = std::chrono::milliseconds;

View File

@ -1,14 +1,12 @@
#pragma once
#include <chrono>
#include <iostream>
#include <ratio>
#include <utility>
#define time_now() std::chrono::high_resolution_clock::now()
#include "utils/time/time.hpp"
using ns = std::chrono::nanoseconds;
using ms = std::chrono::milliseconds;
#define time_now() std::chrono::high_resolution_clock::now()
template <typename DurationUnit = std::chrono::nanoseconds>
auto to_duration(const std::chrono::duration<long, std::nano> &delta)

View File

@ -8,13 +8,15 @@
#include "logging/default.hpp"
/** @class Timer
* @brief The timer contains counter and handler.
/**
* @class Timer
*
* With every clock interval the counter should be decresed for
* delta count. Delta count is one for now but it should be a variable in the
* near future. The handler is function that will be called when counter
* becomes zero or smaller than zero.
* @brief The timer contains counter and handler.
*
* With every clock interval the counter should be decresed for
* delta count. Delta count is one for now but it should be a variable in the
* near future. The handler is function that will be called when counter
* becomes zero or smaller than zero.
*/
struct Timer
{
@ -48,14 +50,16 @@ struct Timer
* the process method.
*/
/** @class TimerSet
* @brief Trivial timer container implementation.
/**
* @class TimerSet
*
* Internal data stucture for storage of timers is std::set. So, the
* related timer complexities are:
* insertion: O(log(n))
* deletion: O(log(n))
* process: O(n)
* @brief Trivial timer container implementation.
*
* Internal data stucture for storage of timers is std::set. So, the
* related timer complexities are:
* insertion: O(log(n))
* deletion: O(log(n))
* process: O(n)
*/
class TimerSet
{
@ -70,6 +74,11 @@ public:
timers.erase(timer);
}
uint64_t size() const
{
return timers.size();
}
void process()
{
for (auto it = timers.begin(); it != timers.end(); ) {
@ -87,10 +96,17 @@ private:
std::set<std::shared_ptr<Timer>> timers;
};
/** @class TimerScheduler
* @brief TimerScheduler is a manager class and its responsibility is to
* take care of the time and call the timer_container process method in the
* appropriate time.
/**
* @class TimerScheduler
*
* @brief TimerScheduler is a manager class and its responsibility is to
* take care of the time and call the timer_container process method in the
* appropriate time.
*
* @tparam timer_container_type implements a strategy how the timers
* are processed
* @tparam delta_time_type type of a time distance between two events
* @tparam delta_time granularity between the two events, default value is 1
*/
template <
typename timer_container_type,
@ -99,19 +115,47 @@ template <
> class TimerScheduler
{
public:
/**
* Adds a timer.
*
* @param timer shared pointer to the timer object \ref Timer
*/
void add(Timer::sptr timer)
{
timer_container.add(timer);
}
/**
* Removes a timer.
*
* @param timer shared pointer to the timer object \ref Timer
*/
void remove(Timer::sptr timer)
{
timer_container.remove(timer);
}
/**
* Provides the number of pending timers. The exact number has to be
* provided by a timer_container.
*
* @return uint64_t the number of pending timers.
*/
uint64_t size() const
{
return timer_container.size();
}
/**
* Runs a separate thread which responsibility is to run the process method
* at the appropriate time (every delta_time from the beginning of
* processing.
*/
void run()
{
is_running.store(true);
run_thread = std::thread([this]() {
while (is_running.load()) {
std::this_thread::sleep_for(delta_time_type(delta_time));
@ -121,11 +165,17 @@ public:
});
}
/**
* Stops the whole processing.
*/
void stop()
{
is_running.store(false);
}
/**
* Joins the processing thread.
*/
~TimerScheduler()
{
run_thread.join();

View File

@ -1,45 +1,37 @@
cmake_minimum_required(VERSION 3.1)
find_package(Threads REQUIRED)
project(memgraph_poc)
# get all cpp abs file names recursively starting from current directory
file(GLOB poc_cpps *.cpp)
message(STATUS "Available poc cpp files are: ${poc_cpps}")
include_directories(${CMAKE_SOURCE_DIR}/poc)
include_directories(${CMAKE_SOURCE_DIR}/queries)
add_executable(poc_astar astar.cpp)
target_link_libraries(poc_astar memgraph)
target_link_libraries(poc_astar Threads::Threads)
target_link_libraries(poc_astar ${fmt_static_lib})
target_link_libraries(poc_astar ${yaml_static_lib})
# for each cpp file build binary
foreach(poc_cpp ${poc_cpps})
add_executable(powerlinx_profile profile.cpp)
target_link_libraries(powerlinx_profile memgraph)
target_link_libraries(powerlinx_profile Threads::Threads)
target_link_libraries(powerlinx_profile ${fmt_static_lib})
target_link_libraries(powerlinx_profile ${yaml_static_lib})
# get exec name (remove extension from the abs path)
get_filename_component(exec_name ${poc_cpp} NAME_WE)
add_executable(csv_import csv_import.cpp)
target_link_libraries(csv_import memgraph)
target_link_libraries(csv_import Threads::Threads)
target_link_libraries(csv_import ${fmt_static_lib})
target_link_libraries(csv_import ${yaml_static_lib})
# set target name in format {project_name}_{test_type}_{exec_name}
set(target_name ${project_name}_poc_${exec_name})
add_executable(add_double add_double.cpp)
target_link_libraries(add_double memgraph)
target_link_libraries(add_double Threads::Threads)
target_link_libraries(add_double ${fmt_static_lib})
target_link_libraries(add_double ${yaml_static_lib})
# build exec file
add_executable(${target_name} ${poc_cpp} isolation/header.cpp)
set_property(TARGET ${target_name} PROPERTY CXX_STANDARD ${cxx_standard})
add_executable(astar_query astar_query.cpp)
target_link_libraries(astar_query memgraph)
target_link_libraries(astar_query Threads::Threads)
target_link_libraries(astar_query ${fmt_static_lib})
target_link_libraries(astar_query ${yaml_static_lib})
# OUTPUT_NAME sets the real name of a target when it is built and can be
# used to help create two targets of the same name even though CMake
# requires unique logical target names
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
add_executable(size_aligment size_aligment.cpp)
target_link_libraries(size_aligment memgraph)
target_link_libraries(size_aligment Threads::Threads)
target_link_libraries(size_aligment ${fmt_static_lib})
target_link_libraries(size_aligment ${yaml_static_lib})
# link libraries
# threads (cross-platform)
target_link_libraries(${target_name} Threads::Threads)
# memgraph lib
target_link_libraries(${target_name} memgraph_lib)
# fmt format lib
target_link_libraries(${target_name} ${fmt_static_lib})
# yaml parser lib
target_link_libraries(${target_name} ${yaml_static_lib})
add_executable(isolation isolation.cpp isolation/header.cpp)
target_link_libraries(isolation ${fmt_static_lib})
endforeach()

View File

@ -14,9 +14,13 @@ using std::endl;
// Dressipi astar query of 4 clicks.
// TODO: push down appropriate
using Stream = std::ostream;
// TODO: figure out from the pattern in a query
constexpr size_t max_depth = 3;
// TODO: from query LIMIT 10
constexpr size_t limit = 10;
class Node
@ -79,10 +83,12 @@ void astar(DbAccessor &t, plan_args_t &args, Stream &stream)
auto cmp = [](Node *left, Node *right) { return left->cost > right->cost; };
std::priority_queue<Node *, std::vector<Node *>, decltype(cmp)> queue(cmp);
// TODO: internal id independent
auto start_vr = t.vertex_find(Id(args[0].as<Int64>().value()));
if (!start_vr.is_present())
{
// stream.write_failure({{}});
// TODO: stream failure
return;
}
@ -96,15 +102,19 @@ void astar(DbAccessor &t, plan_args_t &args, Stream &stream)
auto now = queue.top();
queue.pop();
if (max_depth <= now->depth)
if (now->depth >= max_depth)
{
// stream.write_success_empty();
// best.push_back(now);
// TODO: stream the result
count++;
if (count >= limit)
{
// the limit was reached -> STOP the execution
break;
}
// if the limit wasn't reached -> POP the next vertex
continue;
}
@ -130,6 +140,8 @@ public:
{
DbAccessor t(db);
// TODO: find node
astar(t, args, stream);
return t.commit();

View File

@ -2,6 +2,8 @@
# Initial version of script that is going to be used for release build.
# NOTE: do not run this script as a super user
# TODO: enable options related to lib
echo "Memgraph Release Building..."
@ -33,12 +35,14 @@ mkdir -p ../release/${exe_name}
# copy all relevant files
cp ${exe_name} ../release/${exe_name}/memgraph
cp libmemgraph_pic.a ../release/${exe_name}/libmemgraph_pic.a
rm -rf ../release/${exe_name}/include
cp -r include ../release/${exe_name}/include
cp -r template ../release/${exe_name}/template
cp -r ../config ../release/${exe_name}/config
# create compiled folder and copy hard coded queries
mkdir -p ../release/${exe_name}/compiled/cpu/hardcode
rm -rf ../release/${exe_name}/compiled/cpu/hardcode/*
cp ../tests/integration/hardcoded_query/*.cpp ../release/${exe_name}/compiled/cpu/hardcode
cp ../tests/integration/hardcoded_query/*.hpp ../release/${exe_name}/compiled/cpu/hardcode

View File

@ -1 +0,0 @@
// TODO

View File

@ -1 +0,0 @@
// TODO

View File

@ -23,7 +23,8 @@ void clean_version_lists(A &&acc, Id oldest_active)
{
// TODO: Optimization, iterator with remove method.
bool succ = acc.remove(vlist.first);
assert(succ); // There is other cleaner here
// There is other cleaner here
runtime_assert(succ, "Remove has failed");
}
}
}
@ -56,7 +57,7 @@ void DbTransaction::clean_vertex_section()
bool DbTransaction::update_indexes()
{
logger.debug("index_updates: {}, instance: {}, transaction: {}",
logger.trace("index_updates: {}, instance: {}, transaction: {}",
index_updates.size(), static_cast<void *>(this), trans.id);
while (!index_updates.empty())
@ -107,7 +108,7 @@ void DbTransaction::to_update_index(typename TG::vlist_t *vlist,
typename TG::record_t *record)
{
index_updates.emplace_back(make_index_update(vlist, record));
logger.debug("update_index, updates_no: {}, instance: {}, transaction: {}",
logger.trace("update_index, updates_no: {}, instance: {}, transaction: {}",
index_updates.size(), static_cast<void *>(this), trans.id);
}

View File

@ -9,32 +9,21 @@
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/signals/handler.hpp"
#include "utils/terminate_handler.hpp"
#include "utils/stacktrace/log.hpp"
static bolt::Server<bolt::Worker>* serverptr;
static bolt::Server<bolt::Worker> *serverptr;
Logger logger;
void sigint_handler(int s)
{
auto signal = s == SIGINT ? "SIGINT" : "SIGABRT";
logger.info("Recieved signal {}", signal);
logger.info("Shutting down...");
std::exit(EXIT_SUCCESS);
}
static constexpr const char* interface = "0.0.0.0";
static constexpr const char* port = "7687";
// TODO: load from configuration
static constexpr const char *interface = "0.0.0.0";
static constexpr const char *port = "7687";
int main(void)
{
// TODO figure out what is the relationship between this and signals
// that are configured below
std::set_terminate(&terminate_handler);
// logger init
// logging init
#ifdef SYNC_LOGGER
logging::init_sync();
#else
@ -42,41 +31,53 @@ int main(void)
#endif
logging::log->pipe(std::make_unique<Stdout>());
// get Main logger
// logger init
logger = logging::log->logger("Main");
logger.info("{}", logging::log->type());
signal(SIGINT, sigint_handler);
signal(SIGABRT, sigint_handler);
// unhandled exception handler
std::set_terminate(&terminate_handler);
// signal handling
SignalHandler::register_handler(Signal::SegmentationFault, []() {
log_stacktrace("SegmentationFault signal raised");
std::exit(EXIT_FAILURE);
});
SignalHandler::register_handler(Signal::Terminate, []() {
log_stacktrace("Terminate signal raised");
std::exit(EXIT_FAILURE);
});
SignalHandler::register_handler(Signal::Abort, []() {
log_stacktrace("Abort signal raised");
std::exit(EXIT_FAILURE);
});
// initialize socket
io::Socket socket;
try
{
socket = io::Socket::bind(interface, port);
}
catch(io::NetworkError e)
catch (io::NetworkError e)
{
logger.error("Cannot bind to socket on {} at {}", interface, port);
logger.error("{}", e.what());
std::exit(EXIT_FAILURE);
}
socket.set_non_blocking();
socket.listen(1024);
logger.info("Listening on {} at {}", interface, port);
// initialize server
bolt::Server<bolt::Worker> server(std::move(socket));
serverptr = &server;
// server start with N threads
// TODO: N should be configurable
auto N = std::thread::hardware_concurrency();
logger.info("Starting {} workers", N);
server.start(N);
logger.info("Shutting down...");
return EXIT_SUCCESS;
}

View File

@ -2,6 +2,7 @@
#include <cassert>
#include "utils/assert.hpp"
#include "storage/vertex_record.hpp"
#include "storage/edge_type/edge_type.hpp"
@ -10,10 +11,12 @@ void EdgeAccessor::remove() const
RecordAccessor::remove();
auto from_va = from();
assert(from_va.fill());
auto from_va_is_full = from_va.fill();
runtime_assert(from_va_is_full, "From Vertex Accessor is empty");
auto to_va = to();
assert(to_va.fill());
auto to_va_is_full = to_va.fill();
permanent_assert(to_va_is_full, "To Vertex Accessor is empty");
from_va.update().record->data.out.remove(vlist);
to_va.update().record->data.in.remove(vlist);

View File

@ -1,128 +1,51 @@
cmake_minimum_required(VERSION 3.1)
project(memgraph_tests)
project(${project_name}_tests)
set(src_dir ${CMAKE_SOURCE_DIR}/src)
enable_testing()
include_directories(${catch_source_dir}/include)
# TODO: modular approach (REFACTOR)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/test_results/unit)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/test_results/benchmark)
## UNIT TESTS
# find unit tests
file(GLOB_RECURSE unit_test_files ${CMAKE_HOME_DIRECTORY}/tests/unit/*.cpp)
get_file_names("${unit_test_files}" file_names)
set(unit_test_names "${file_names}")
message(STATUS "Available unit tests are: ${unit_test_names}")
# copy unit test data
# copy test data
file(COPY ${CMAKE_SOURCE_DIR}/tests/data
DESTINATION ${CMAKE_BINARY_DIR}/tests)
# build unit tests
foreach(test ${unit_test_names})
set(test_name unit_${test})
add_executable(${test_name} unit/${test}.cpp ${src_dir}/template_engine/engine.cpp)
target_link_libraries(${test_name} memgraph)
# TODO: separate dependencies
target_link_libraries(${test_name} stdc++fs)
target_link_libraries(${test_name} cypher_lib)
target_link_libraries(${test_name} Threads::Threads)
target_link_libraries(${test_name} ${fmt_static_lib})
target_link_libraries(${test_name} ${yaml_static_lib})
add_test(NAME ${test_name} COMMAND ${test_name})
set_property(TARGET ${test_name} PROPERTY CXX_STANDARD 14)
endforeach()
# move test data data to the build directory
if (UNIX)
set(test_data "tests/data")
set(test_data_src "${CMAKE_SOURCE_DIR}/${test_data}")
set(test_data_dst "${CMAKE_BINARY_DIR}/${test_data}")
add_custom_target (test_data
COMMAND rm -rf ${test_data_dst}
COMMAND cp -r ${test_data_src} ${test_data_dst}
)
endif (UNIX)
# -----------------------------------------------------------------------------
## CONCURRENCY TESTS
# benchmark test binaries
if (ALL_TESTS OR BENCHMARK_TESTS)
add_subdirectory(${PROJECT_SOURCE_DIR}/benchmark)
endif()
# find concurrency tests
file(GLOB_RECURSE concurrency_test_files
${CMAKE_HOME_DIRECTORY}/tests/concurrent/*.cpp)
get_file_names("${concurrency_test_files}" file_names)
set(concurrency_test_names "${file_names}")
message(STATUS "Available concurrency tests are: ${concurrency_test_names}")
# concurrent test binaries
if (ALL_TESTS OR CONCURRENT_TESTS)
add_subdirectory(${PROJECT_SOURCE_DIR}/concurrent)
endif()
# build concurrency tests
foreach(test ${concurrency_test_names})
set(test_name concurrent_${test})
add_executable(${test_name} concurrent/${test}.cpp)
target_link_libraries(${test_name} memgraph)
target_link_libraries(${test_name} Threads::Threads)
target_link_libraries(${test_name} ${fmt_static_lib})
target_link_libraries(${test_name} ${yaml_static_lib})
add_test(NAME ${test_name} COMMAND ${test_name})
set_property(TARGET ${test_name} PROPERTY CXX_STANDARD 14)
endforeach()
# integration test binaries
if (ALL_TESTS OR INTEGRATION_TESTS)
add_subdirectory(${PROJECT_SOURCE_DIR}/integration)
endif()
## INTEGRATION TESTS
# manual test binaries
if (ALL_TESTS OR MANUAL_TESTS)
add_subdirectory(${PROJECT_SOURCE_DIR}/manual)
endif()
# test hard coded queries
add_executable(integration_queries integration/queries.cpp)
target_link_libraries(integration_queries stdc++fs)
target_link_libraries(integration_queries memgraph)
target_link_libraries(integration_queries Threads::Threads)
target_link_libraries(integration_queries ${fmt_static_lib})
target_link_libraries(integration_queries ${yaml_static_lib})
add_test(NAME integration_queries COMMAND integration_queries)
set_property(TARGET integration_queries PROPERTY CXX_STANDARD 14)
# test cleaning methods
add_executable(cleaning integration/cleaning.cpp)
target_link_libraries(cleaning memgraph)
target_link_libraries(cleaning Threads::Threads)
target_link_libraries(cleaning ${fmt_static_lib})
target_link_libraries(cleaning ${yaml_static_lib})
add_test(NAME cleaning COMMAND cleaning)
set_property(TARGET cleaning PROPERTY CXX_STANDARD 14)
# test snapshot validity
add_executable(snapshot integration/snapshot.cpp)
target_link_libraries(snapshot memgraph)
target_link_libraries(snapshot Threads::Threads)
target_link_libraries(snapshot ${fmt_static_lib})
target_link_libraries(snapshot ${yaml_static_lib})
add_test(NAME snapshot COMMAND snapshot)
set_property(TARGET snapshot PROPERTY CXX_STANDARD 14)
# test index validity
add_executable(index integration/index.cpp)
target_link_libraries(index memgraph)
target_link_libraries(index Threads::Threads)
target_link_libraries(index ${fmt_static_lib})
target_link_libraries(index ${yaml_static_lib})
add_test(NAME index COMMAND index)
set_property(TARGET index PROPERTY CXX_STANDARD 14)
## MANUAL TESTS
# cypher_ast
add_executable(manual_cypher_ast manual/cypher_ast.cpp)
target_link_libraries(manual_cypher_ast stdc++fs)
target_link_libraries(manual_cypher_ast memgraph)
target_link_libraries(manual_cypher_ast Threads::Threads)
target_link_libraries(manual_cypher_ast ${fmt_static_lib})
target_link_libraries(manual_cypher_ast ${yaml_static_lib})
target_link_libraries(manual_cypher_ast cypher_lib)
set_property(TARGET manual_cypher_ast PROPERTY CXX_STANDARD 14)
# query_engine
add_executable(manual_query_engine manual/query_engine.cpp)
target_link_libraries(manual_query_engine stdc++fs)
target_link_libraries(manual_query_engine memgraph)
target_link_libraries(manual_query_engine ${fmt_static_lib})
target_link_libraries(manual_query_engine ${yaml_static_lib})
target_link_libraries(manual_query_engine dl)
target_link_libraries(manual_query_engine cypher_lib)
target_link_libraries(manual_query_engine Threads::Threads)
set_property(TARGET manual_query_engine PROPERTY CXX_STANDARD 14)
# query_hasher
add_executable(manual_query_hasher manual/query_hasher.cpp)
target_link_libraries(manual_query_hasher stdc++fs)
target_link_libraries(manual_query_hasher memgraph)
target_link_libraries(manual_query_hasher ${fmt_static_lib})
target_link_libraries(manual_query_hasher ${yaml_static_lib})
target_link_libraries(manual_query_hasher Threads::Threads)
set_property(TARGET manual_query_hasher PROPERTY CXX_STANDARD 14)
# unit test binaries
if (ALL_TESTS OR UNIT_TESTS)
add_subdirectory(${PROJECT_SOURCE_DIR}/unit)
endif()

View File

@ -1,21 +1,46 @@
find_package(Threads REQUIRED)
file(GLOB_RECURSE ALL_BENCH_CPP *.cpp)
# set current directory name as a test type
get_filename_component(test_type ${CMAKE_CURRENT_SOURCE_DIR} NAME)
foreach(ONE_BENCH_CPP ${ALL_BENCH_CPP})
# get all cpp abs file names recursively starting from current directory
file(GLOB_RECURSE test_type_cpps *.cpp)
message(STATUS "Available ${test_type} cpp files are: ${test_type_cpps}")
get_filename_component(ONE_BENCH_EXEC ${ONE_BENCH_CPP} NAME_WE)
# for each cpp file build binary and register test
foreach(test_cpp ${test_type_cpps})
# Avoid name collision
set(TARGET_NAME Bench_${ONE_BENCH_EXEC})
# get exec name (remove extension from the abs path)
get_filename_component(exec_name ${test_cpp} NAME_WE)
add_executable(${TARGET_NAME} ${ONE_BENCH_CPP})
set_target_properties(${TARGET_NAME} PROPERTIES OUTPUT_NAME ${ONE_BENCH_EXEC})
target_link_libraries(${TARGET_NAME} benchmark ${CMAKE_THREAD_LIBS_INIT})
target_link_libraries(${TARGET_NAME} memgraph)
target_link_libraries(${TARGET_NAME} ${fmt_static_lib})
target_link_libraries(${TARGET_NAME} Threads::Threads)
target_link_libraries(${TARGET_NAME} ${yaml_static_lib})
add_test(${TARGET_NAME} ${ONE_BENCH_EXEC})
# set target name in format {project_name}_{test_type}_{exec_name}
set(target_name ${project_name}_${test_type}_${exec_name})
# build exec file
add_executable(${target_name} ${test_cpp})
set_property(TARGET ${target_name} PROPERTY CXX_STANDARD ${cxx_standard})
# OUTPUT_NAME sets the real name of a target when it is built and can be
# used to help create two targets of the same name even though CMake
# requires unique logical target names
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
# link libraries
# threads (cross-platform)
target_link_libraries(${target_name} Threads::Threads)
# google-benchmark
target_link_libraries(${target_name} benchmark ${CMAKE_THREAD_LIBS_INIT})
# memgraph lib
target_link_libraries(${target_name} memgraph_lib)
# fmt format lib
target_link_libraries(${target_name} ${fmt_static_lib})
# yaml parser lib
target_link_libraries(${target_name} ${yaml_static_lib})
# register test
set(output_path
${CMAKE_BINARY_DIR}/test_results/benchmark/${target_name}.json)
add_test(${target_name} ${exec_name}
--benchmark_out_format=json --benchmark_out=${output_path})
endforeach()

View File

@ -0,0 +1,58 @@
#include <random>
#include <thread>
#include "benchmark/benchmark_api.h"
#include "data_structures/bloom/bloom_filter.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "utils/random/generator.h"
using utils::random::StringGenerator;
using StringHashFunction = std::function<uint64_t(const std::string &)>;
template <class Type, int Size>
static void TestBloom(benchmark::State &state, BloomFilter<Type, Size> *bloom,
const std::vector<Type> &elements)
{
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
if (start % 2)
bloom->contains(elements[start]);
else
bloom->insert(elements[start]);
}
state.SetComplexityN(state.range(0));
}
auto BM_Bloom = [](benchmark::State &state, auto *bloom, const auto &elements) {
TestBloom(state, bloom, elements);
};
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
StringGenerator generator(4);
auto elements = utils::random::generate_vector(generator, 1 << 16);
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {hash1, hash2};
BloomFilter<std::string, 128> bloom(funcs);
benchmark::RegisterBenchmark("SimpleBloomFilter Benchmark Test", BM_Bloom,
&bloom, elements)
->RangeMultiplier(2)
->Range(1, 1 << 16)
->Complexity(benchmark::oN);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}

View File

@ -0,0 +1,193 @@
#include <random>
#include <thread>
#include "data_structures/bloom/bloom_filter.hpp"
#include "data_structures/concurrent/concurrent_bloom_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using StringHashFunction = std::function<uint64_t(const std::string &)>;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V, class F>
static void InsertValue(benchmark::State &state,
ConcurrentBloomMap<K, V, F> *map,
const std::vector<std::pair<K, V>> &elements)
{
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
{
map->insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V, class F>
static void ContainsValue(benchmark::State &state,
ConcurrentBloomMap<K, V, F> *map,
const std::vector<std::pair<K, V>> elements)
{
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
{
map->contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State &state, auto *map, auto &elements) {
InsertValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State &state, auto *map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char **argv)
{
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {hash1, hash2};
BloomFilter<std::string, 128> bloom_filter_(funcs);
// maps used for testing
// ConcurrentBloomMap<int, int> ii_map;
// ConcurrentBloomMap<int, std::string> is_map;
using Filter = BloomFilter<std::string, 128>;
ConcurrentBloomMap<std::string, int, Filter> si_map(bloom_filter_);
ConcurrentBloomMap<std::string, std::string, Filter> ss_map(bloom_filter_);
// random elements for testing
// auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
// auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,254 +0,0 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V>
static void InsertValue(benchmark::State& state, ConcurrentMap<K, V>* map,
const std::vector<std::pair<K, V>>& elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Deletion Benchmark Test
*/
template <class K, class V>
static void DeleteValue(benchmark::State& state, ConcurrentMap<K, V>* map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.remove(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V>
static void ContainsValue(benchmark::State& state, ConcurrentMap<K, V>* map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State& state, auto* map, auto& elements) {
InsertValue(state, map, elements);
};
auto BM_DeleteValue = [](benchmark::State& state, auto* map, auto elements) {
DeleteValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State& state, auto* map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char** argv) {
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
// maps used for testing
ConcurrentMap<int, int> ii_map;
ConcurrentMap<int, std::string> is_map;
ConcurrentMap<std::string, int> si_map;
ConcurrentMap<std::string, std::string> ss_map;
// random elements for testing
auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[Int, Int]", BM_InsertValue, &ii_map,
ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[Int, String]", BM_InsertValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[Int, Int]", BM_ContainsValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[Int, String]", BM_ContainsValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Deletion Banchamark Tests
benchmark::RegisterBenchmark("DeleteValue[Int, Int]", BM_DeleteValue, &ii_map,
ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[Int, String]", BM_DeleteValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, Int]", BM_DeleteValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, String]", BM_DeleteValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,123 +0,0 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Deletion and Find
- benchmarks time for total execution with operation percentages
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global Arguments
int MAX_ELEMENTS = 1 << 20, MULTIPLIER = 2;
int THREADS, INSERT_PERC, DELETE_PERC, CONTAINS_PERC, RANGE_START, RANGE_END;
// ConcurrentMap Becnhmark Test using percentages for Insert, Delete, Find
template <class K, class V>
static void Rape(benchmark::State& state, ConcurrentMap<int, int>* map,
const std::vector<std::pair<K, V>>& elements) {
int number_of_elements = state.range(0);
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
float current_percentage = (float)start / (float)number_of_elements * 100;
if (current_percentage < (float)INSERT_PERC) {
accessor.insert(elements[start].first, elements[start].second);
} else if (current_percentage < (float)CONTAINS_PERC + INSERT_PERC) {
accessor.contains(elements[start].first);
} else {
accessor.remove(elements[start].first);
}
}
}
state.SetComplexityN(state.range(0));
}
auto BM_Rape = [](benchmark::State& state, auto* map, auto& elements) {
Rape(state, map, elements);
};
/*
Commandline Arguments Parsing
Arguments:
* Insertion percentage (0-100)
-insert number(int)
* Deletion percentage (0-100)
-delete number(int)
* Find percentage (0-100)
-find number(int)
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
-threads number
*/
void parse_arguments(int argc, char** argv) {
REGISTER_ARGS(argc, argv);
INSERT_PERC = GET_ARG("-insert", "50").get_int();
DELETE_PERC = GET_ARG("-delete", "20").get_int();
CONTAINS_PERC = GET_ARG("-find", "30").get_int();
if (INSERT_PERC + DELETE_PERC + CONTAINS_PERC != 100) {
std::cout << "Invalid percentage" << std::endl;
std::cout << "Percentage must sum to 100" << std::endl;
exit(-1);
}
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
}
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
IntegerGenerator int_gen(RANGE_START, RANGE_END);
PairGenerator<IntegerGenerator, IntegerGenerator> pair_gen(&int_gen,
&int_gen);
ConcurrentMap<int, int> map;
auto elements = utils::random::generate_vector(pair_gen, MAX_ELEMENTS);
benchmark::RegisterBenchmark("Rape", BM_Rape, &map, elements)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -0,0 +1,265 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V>
static void InsertValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> &elements)
{
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
accessor.insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Deletion Benchmark Test
*/
template <class K, class V>
static void DeleteValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> elements)
{
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
accessor.remove(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V>
static void ContainsValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> elements)
{
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
accessor.contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State &state, auto *map, auto &elements) {
InsertValue(state, map, elements);
};
auto BM_DeleteValue = [](benchmark::State &state, auto *map, auto elements) {
DeleteValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State &state, auto *map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char **argv)
{
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
// maps used for testing
ConcurrentMap<int, int> ii_map;
ConcurrentMap<int, std::string> is_map;
ConcurrentMap<std::string, int> si_map;
ConcurrentMap<std::string, std::string> ss_map;
// random elements for testing
auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[Int, Int]", BM_InsertValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[Int, String]", BM_InsertValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[Int, Int]", BM_ContainsValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[Int, String]", BM_ContainsValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Deletion Banchamark Tests
benchmark::RegisterBenchmark("DeleteValue[Int, Int]", BM_DeleteValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[Int, String]", BM_DeleteValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, Int]", BM_DeleteValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, String]", BM_DeleteValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -0,0 +1,135 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Deletion and Find
- benchmarks time for total execution with operation percentages
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global Arguments
int MAX_ELEMENTS = 1 << 20, MULTIPLIER = 2;
int THREADS, INSERT_PERC, DELETE_PERC, CONTAINS_PERC, RANGE_START, RANGE_END;
// ConcurrentMap Becnhmark Test using percentages for Insert, Delete, Find
template <class K, class V>
static void Rape(benchmark::State &state, ConcurrentMap<int, int> *map,
const std::vector<std::pair<K, V>> &elements)
{
int number_of_elements = state.range(0);
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
float current_percentage =
(float)start / (float)number_of_elements * 100;
if (current_percentage < (float)INSERT_PERC)
{
accessor.insert(elements[start].first, elements[start].second);
}
else if (current_percentage < (float)CONTAINS_PERC + INSERT_PERC)
{
accessor.contains(elements[start].first);
}
else
{
accessor.remove(elements[start].first);
}
}
}
state.SetComplexityN(state.range(0));
}
auto BM_Rape = [](benchmark::State &state, auto *map, auto &elements) {
Rape(state, map, elements);
};
/*
Commandline Arguments Parsing
Arguments:
* Insertion percentage (0-100)
-insert number(int)
* Deletion percentage (0-100)
-delete number(int)
* Find percentage (0-100)
-find number(int)
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
-threads number
*/
void parse_arguments(int argc, char **argv)
{
REGISTER_ARGS(argc, argv);
INSERT_PERC = GET_ARG("-insert", "50").get_int();
DELETE_PERC = GET_ARG("-delete", "20").get_int();
CONTAINS_PERC = GET_ARG("-find", "30").get_int();
if (INSERT_PERC + DELETE_PERC + CONTAINS_PERC != 100)
{
std::cout << "Invalid percentage" << std::endl;
std::cout << "Percentage must sum to 100" << std::endl;
exit(-1);
}
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
IntegerGenerator int_gen(RANGE_START, RANGE_END);
PairGenerator<IntegerGenerator, IntegerGenerator> pair_gen(&int_gen,
&int_gen);
ConcurrentMap<int, int> map;
auto elements = utils::random::generate_vector(pair_gen, MAX_ELEMENTS);
benchmark::RegisterBenchmark("Rape", BM_Rape, &map, elements)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,34 +0,0 @@
#include "benchmark/benchmark_api.h"
#include <set>
#include <vector>
static void BM_VectorInsert(benchmark::State &state)
{
while (state.KeepRunning()) {
std::vector<int> insertion_test;
for (int i = 0, i_end = state.range_x(); i < i_end; i++) {
insertion_test.push_back(i);
}
}
}
// Register the function as a benchmark
BENCHMARK(BM_VectorInsert)->Range(8, 8 << 10);
//~~~~~~~~~~~~~~~~
// Define another benchmark
static void BM_SetInsert(benchmark::State &state)
{
while (state.KeepRunning()) {
std::set<int> insertion_test;
for (int i = 0, i_end = state.range_x(); i < i_end; i++) {
insertion_test.insert(i);
}
}
}
BENCHMARK(BM_SetInsert)->Range(8, 8 << 10);
BENCHMARK_MAIN();

View File

@ -1,34 +0,0 @@
#include "benchmark/benchmark_api.h"
#include <set>
#include <vector>
static void BM_VectorInsert(benchmark::State &state)
{
while (state.KeepRunning()) {
std::vector<int> insertion_test;
for (int i = 0, i_end = state.range_x(); i < i_end; i++) {
insertion_test.push_back(i);
}
}
}
// Register the function as a benchmark
BENCHMARK(BM_VectorInsert)->Range(8, 8 << 10);
//~~~~~~~~~~~~~~~~
// Define another benchmark
static void BM_SetInsert(benchmark::State &state)
{
while (state.KeepRunning()) {
std::set<int> insertion_test;
for (int i = 0, i_end = state.range_x(); i < i_end; i++) {
insertion_test.insert(i);
}
}
}
BENCHMARK(BM_SetInsert)->Range(8, 8 << 10);
BENCHMARK_MAIN();

View File

@ -1,44 +1,47 @@
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/time/timer.hpp"
#include "query/preprocesor.hpp"
#include "utils/time/timer.hpp"
#include "benchmark/benchmark_api.h"
#include "yaml-cpp/yaml.h"
auto BM_Strip = [](benchmark::State& state, auto& function, std::string query) {
while (state.KeepRunning()) {
for (int start = 0; start < state.range(0); start++) {
function(query);
auto BM_Strip = [](benchmark::State &state, auto &function, std::string query) {
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
{
function(query);
}
}
}
state.SetComplexityN(state.range(0));
state.SetComplexityN(state.range(0));
};
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
YAML::Node dataset = YAML::LoadFile(
"../../tests/data/cypher_queries/stripper/query_dict.yaml");
YAML::Node dataset = YAML::LoadFile(
"../../tests/data/cypher_queries/stripper/query_dict.yaml");
QueryPreprocessor processor;
using std::placeholders::_1;
std::function<QueryStripped(const std::string& query)> preprocess =
std::bind(&QueryPreprocessor::preprocess, &processor, _1);
QueryPreprocessor processor;
using std::placeholders::_1;
std::function<QueryStripped(const std::string &query)> preprocess =
std::bind(&QueryPreprocessor::preprocess, &processor, _1);
auto tests = dataset["benchmark_queries"].as<std::vector<std::string>>();
for (auto& test : tests) {
auto* benchmark =
benchmark::RegisterBenchmark(test.c_str(), BM_Strip, preprocess, test)
->RangeMultiplier(2)
->Range(1, 8 << 10)
->Complexity(benchmark::oN);
;
}
auto tests = dataset["benchmark_queries"].as<std::vector<std::string>>();
for (auto &test : tests)
{
auto *benchmark = benchmark::RegisterBenchmark(test.c_str(), BM_Strip,
preprocess, test)
->RangeMultiplier(2)
->Range(1, 8 << 10)
->Complexity(benchmark::oN);
}
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
return 0;
}

View File

@ -0,0 +1,43 @@
find_package(Threads REQUIRED)
# set current directory name as a test type
get_filename_component(test_type ${CMAKE_CURRENT_SOURCE_DIR} NAME)
# get all cpp abs file names recursively starting from current directory
file(GLOB_RECURSE test_type_cpps *.cpp)
message(STATUS "Available ${test_type} cpp files are: ${test_type_cpps}")
# for each cpp file build binary and register test
foreach(test_cpp ${test_type_cpps})
# get exec name (remove extension from the abs path)
get_filename_component(exec_name ${test_cpp} NAME_WE)
# set target name in format {project_name}_{test_type}_{exec_name}
set(target_name ${project_name}_${test_type}_${exec_name})
# build exec file
add_executable(${target_name} ${test_cpp})
set_property(TARGET ${target_name} PROPERTY CXX_STANDARD ${cxx_standard})
# OUTPUT_NAME sets the real name of a target when it is built and can be
# used to help create two targets of the same name even though CMake
# requires unique logical target names
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
# link libraries
# gtest
target_link_libraries(${target_name} gtest gtest_main)
# threads (cross-platform)
target_link_libraries(${target_name} Threads::Threads)
# memgraph lib
target_link_libraries(${target_name} memgraph_lib)
# fmt format lib
target_link_libraries(${target_name} ${fmt_static_lib})
# yaml parser lib
target_link_libraries(${target_name} ${yaml_static_lib})
# register test
add_test(${target_name} ${exec_name})
endforeach()

View File

@ -3,9 +3,6 @@
#include <iostream>
#include <random>
#include <thread>
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "data_structures/bitset/dynamic_bitset.hpp"
#include "data_structures/concurrent/concurrent_list.hpp"
@ -28,8 +25,8 @@ constexpr int max_no_threads = 8;
using std::cout;
using std::endl;
using map_t = ConcurrentMap<int, int>;
using set_t = ConcurrentSet<int>;
using map_t = ConcurrentMap<int, int>;
using set_t = ConcurrentSet<int>;
using multiset_t = ConcurrentMultiSet<int>;
using multimap_t = ConcurrentMultiMap<int, int>;
@ -55,7 +52,8 @@ template <typename S>
void check_present_same(typename S::Accessor &acc, size_t data,
std::vector<size_t> &owned)
{
for (auto num : owned) {
for (auto num : owned)
{
permanent_assert(acc.find(num)->second == data,
"My data is present and my");
}
@ -83,7 +81,8 @@ void check_size_list(S &acc, long long size)
size_t iterator_counter = 0;
for (auto elem : acc) {
for (auto elem : acc)
{
++iterator_counter;
}
permanent_assert(iterator_counter == size, "Iterator count should be "
@ -103,7 +102,8 @@ void check_size(typename S::Accessor &acc, long long size)
size_t iterator_counter = 0;
for (auto elem : acc) {
for (auto elem : acc)
{
++iterator_counter;
}
permanent_assert(iterator_counter == size, "Iterator count should be "
@ -115,9 +115,11 @@ void check_size(typename S::Accessor &acc, long long size)
template <typename S>
void check_order(typename S::Accessor &acc)
{
if (acc.begin() != acc.end()) {
if (acc.begin() != acc.end())
{
auto last = acc.begin()->first;
for (auto elem : acc) {
for (auto elem : acc)
{
if (!(last <= elem))
std::cout << "Order isn't maintained. Before was: " << last
<< " next is " << elem.first << "\n";
@ -128,7 +130,8 @@ void check_order(typename S::Accessor &acc)
void check_zero(size_t key_range, long array[], const char *str)
{
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
permanent_assert(array[i] == 0,
str << " doesn't hold it's guarantees. It has "
<< array[i] << " extra elements.");
@ -137,7 +140,8 @@ void check_zero(size_t key_range, long array[], const char *str)
void check_set(DynamicBitset<> &db, std::vector<bool> &set)
{
for (int i = 0; i < set.size(); i++) {
for (int i = 0; i < set.size(); i++)
{
permanent_assert(!(set[i] ^ db.at(i)),
"Set constraints aren't fullfilled.");
}
@ -147,8 +151,9 @@ void check_set(DynamicBitset<> &db, std::vector<bool> &set)
void check_multi_iterator(multimap_t::Accessor &accessor, size_t key_range,
long set[])
{
for (int i = 0; i < key_range; i++) {
auto it = accessor.find(i);
for (int i = 0; i < key_range; i++)
{
auto it = accessor.find(i);
auto it_m = accessor.find_multi(i);
permanent_assert(
!(it_m != accessor.end(i) && it == accessor.end()),
@ -161,8 +166,10 @@ void check_multi_iterator(multimap_t::Accessor &accessor, size_t key_range,
"MultiIterator didn't found the same "
"first element. Set: "
<< set[i]);
if (set[i] > 0) {
for (int j = 0; j < set[i]; j++) {
if (set[i] > 0)
{
for (int j = 0; j < set[i]; j++)
{
permanent_assert(
it->second == it_m->second,
"MultiIterator and iterator aren't on the same "
@ -189,7 +196,8 @@ run(size_t threads_no, S &skiplist,
{
std::vector<std::future<std::pair<size_t, R>>> futures;
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i) {
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i)
{
std::packaged_task<std::pair<size_t, R>()> task(
[&skiplist, f, thread_i]() {
return std::pair<size_t, R>(thread_i,
@ -210,7 +218,8 @@ std::vector<std::future<std::pair<size_t, R>>> run(size_t threads_no,
{
std::vector<std::future<std::pair<size_t, R>>> futures;
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i) {
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i)
{
std::packaged_task<std::pair<size_t, R>()> task([f, thread_i]() {
return std::pair<size_t, R>(thread_i, f(thread_i));
}); // wrap the function
@ -225,7 +234,8 @@ template <class R>
auto collect(std::vector<std::future<R>> &collect)
{
std::vector<R> collection;
for (auto &fut : collect) {
for (auto &fut : collect)
{
collection.push_back(fut.get());
}
return collection;
@ -235,9 +245,11 @@ std::vector<bool> collect_set(
std::vector<std::future<std::pair<size_t, std::vector<bool>>>> &&futures)
{
std::vector<bool> set;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
set.resize(data.second.size());
for (int i = 0; i < data.second.size(); i++) {
for (int i = 0; i < data.second.size(); i++)
{
set[i] = set[i] | data.second[i];
}
}
@ -251,58 +263,46 @@ auto insert_try(typename S::Accessor &acc, long long &downcount,
std::vector<K> &owned)
{
return [&](K key, D data) mutable {
if (acc.insert(key, data).second) {
if (acc.insert(key, data).second)
{
downcount--;
owned.push_back(key);
}
};
}
// Helper function.
int parseLine(char *line)
{
// This assumes that a digit will be found and the line ends in " Kb".
int i = strlen(line);
const char *p = line;
while (*p < '0' || *p > '9')
p++;
line[i - 3] = '\0';
i = atoi(p);
return i;
}
// Returns currentlz used memory in kB.
int currently_used_memory()
{ // Note: this value is in KB!
FILE *file = fopen("/proc/self/status", "r");
int result = -1;
char line[128];
while (fgets(line, 128, file) != NULL) {
if (strncmp(line, "VmSize:", 7) == 0) {
result = parseLine(line);
break;
}
}
fclose(file);
return result;
}
// Performs memory check to determine if memory usage before calling given
// function
// is aproximately equal to memory usage after function. Memory usage is thread
// senstive so no_threads spawned in function is necessary.
void memory_check(size_t no_threads, std::function<void()> f)
{
long long start = currently_used_memory();
logging::info("Number of threads: {}", no_threads);
// TODO: replace vm_size with something more appropriate
// the past implementation was teribble wrong
// to that ASAP
// OR
// use custom allocation wrapper
// OR
// user Boost.Test
auto start = vm_size();
logging::info("Memory check (used memory at the beginning): {}", start);
f();
long long leaked =
currently_used_memory() - start -
no_threads * 73732; // OS sensitive, 73732 size allocated for thread
std::cout << "leaked: " << leaked << "\n";
permanent_assert(leaked <= 0, "Memory leak check");
auto end = vm_size();
logging::info("Memory check (used memory at the end): {}", end);
long long delta = end - start;
logging::info("Delta: {}", delta);
// TODO: do memory check somehow
// the past implementation was wrong
permanent_assert(true, "Memory leak");
}
// TODO: move this inside logging/default
// Initializes loging faccilityes
void init_log()
{

View File

@ -1,14 +1,14 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e2;
constexpr size_t op_per_thread = 1e5;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e2;
constexpr size_t op_per_thread = 1e4;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 2;
constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 2;
constexpr size_t no_insert_for_one_delete = 1;
// This test simulates behavior of transactions.
// This test simulates behavior of a transactions.
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
// no_find_per_change and no_insert_for_one_delete.
@ -17,38 +17,50 @@ int main()
init_log();
memory_check(THREADS_NO, [] {
ConcurrentList<std::pair<int, int>> list;
permanent_assert(list.size() == 0, "The list isn't empty");
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, [&](auto index) mutable {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_change = rand_gen_bool(no_find_per_change);
auto rand_delete = rand_gen_bool(no_insert_for_one_delete);
long long sum = 0;
long long count = 0;
long long sum = 0;
long long count = 0;
for (int i = 0; i < op_per_thread; i++) {
auto num = rand();
for (int i = 0; i < op_per_thread; i++)
{
auto num = rand();
auto data = num % max_number;
if (rand_change()) {
if (rand_delete()) {
for (auto it = list.begin(); it != list.end();
it++) {
if (it->first == num) {
if (it.remove()) {
if (rand_change())
{
if (rand_delete())
{
for (auto it = list.begin(); it != list.end(); it++)
{
if (it->first == num)
{
if (it.remove())
{
sum -= data;
count--;
}
break;
}
}
} else {
}
else
{
list.begin().push(std::make_pair(num, data));
sum += data;
count++;
}
} else {
for (auto &v : list) {
if (v.first == num) {
}
else
{
for (auto &v : list)
{
if (v.first == num)
{
permanent_assert(v.second == data,
"Data is invalid");
break;
@ -60,18 +72,23 @@ int main()
return std::pair<long long, long long>(sum, count);
});
auto it = list.begin();
long long sums = 0;
auto it = list.begin();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : list) {
for (auto &e : list)
{
sums -= e.second;
}
permanent_assert(sums == 0, "Same values aren't present");
check_size_list<ConcurrentList<std::pair<int, int>>>(list, counters);
std::this_thread::sleep_for(1s);
});
}

View File

@ -1,33 +1,41 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t bit_part_len = 2;
constexpr size_t no_slots = 1e4;
constexpr size_t key_range = no_slots * THREADS_NO * bit_part_len;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t bit_part_len = 2;
constexpr size_t no_slots = 1e4;
constexpr size_t key_range = no_slots * THREADS_NO * bit_part_len;
constexpr size_t no_sets_per_clear = 2;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto seted =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(no_slots);
auto rand = rand_gen(no_slots);
auto clear_op = rand_gen_bool(no_sets_per_clear);
std::vector<bool> set(key_range);
for (size_t i = 0; i < op_per_thread; i++) {
for (size_t i = 0; i < op_per_thread; i++)
{
size_t num =
rand() * THREADS_NO * bit_part_len + index * bit_part_len;
if (clear_op()) {
if (clear_op())
{
db.clear(num, bit_part_len);
for (int j = 0; j < bit_part_len; j++) {
for (int j = 0; j < bit_part_len; j++)
{
set[num + j] = false;
}
} else {
}
else
{
db.set(num, bit_part_len);
for (int j = 0; j < bit_part_len; j++)
for (int j = 0; j < bit_part_len; j++)
set[num + j] = true;
}
}

View File

@ -1,25 +1,29 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t up_border_bit_set_pow2 = 3;
constexpr size_t key_range =
op_per_thread * THREADS_NO * (1 << up_border_bit_set_pow2) * 2;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto seted =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
for (size_t i = 0; i < op_per_thread; i++)
{
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
db.set(num, len);
for (int j = 0; j < len; j++)
for (int j = 0; j < len; j++)
set[num + j] = true;
}
@ -28,14 +32,16 @@ int main()
auto cleared =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
for (size_t i = 0; i < op_per_thread; i++)
{
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
for (int j = 0; j < len; j++) {
for (int j = 0; j < len; j++)
{
set[num + j] = set[num + j] | db.at(num + j);
}
db.clear(num, len);
@ -44,7 +50,8 @@ int main()
return set;
}));
for (size_t i = 0; i < seted.size(); i++) {
for (size_t i = 0; i < seted.size(); i++)
{
seted[i] = seted[i] & (!cleared[i]);
}

View File

@ -1,17 +1,21 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t key_range = op_per_thread * THREADS_NO * 3;
constexpr size_t key_range = op_per_thread * THREADS_NO * 3;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto set = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
std::vector<bool> set(key_range);
for (size_t i = 0; i < op_per_thread; i++) {
for (size_t i = 0; i < op_per_thread; i++)
{
size_t num = rand();
db.set(num);
set[num] = true;

View File

@ -1,24 +1,28 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t up_border_bit_set_pow2 = 3;
constexpr size_t key_range =
op_per_thread * THREADS_NO * (1 << up_border_bit_set_pow2) * 2;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto set = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
for (size_t i = 0; i < op_per_thread; i++)
{
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
db.set(num, len);
for (int j = 0; j < len; j++)
for (int j = 0; j < len; j++)
set[num + j] = true;
}

View File

@ -1,62 +0,0 @@
#include <cassert>
#include <iostream>
#include <thread>
#include "common.h"
#include "data_structures/linked_list.hpp"
using std::cout;
using std::endl;
template <typename list_type>
void test_concurrent_list_access(list_type &list, std::size_t size)
{
// test concurrent access
for (int i = 0; i < 1000000; ++i) {
std::thread t1([&list] {
list.push_front(1);
list.pop_front();
});
std::thread t2([&list] {
list.push_front(2);
list.pop_front();
});
t1.join();
t2.join();
assert(list.size() == size);
}
}
int main()
{
init_log();
LinkedList<int> list;
// push & pop operations
list.push_front(10);
list.push_front(20);
auto a = list.front();
assert(a == 20);
list.pop_front();
a = list.front();
assert(a == 10);
list.pop_front();
assert(list.size() == 0);
// concurrent test
LinkedList<int> concurrent_list;
concurrent_list.push_front(1);
concurrent_list.push_front(1);
std::list<int> no_concurrent_list;
no_concurrent_list.push_front(1);
no_concurrent_list.push_front(1);
test_concurrent_list_access(concurrent_list, 2);
// test_concurrent_list_access(no_concurrent_list, 2);
return 0;
}

View File

@ -3,25 +3,29 @@
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 100000;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
// TODO: document the test
// This test checks insert_unique method under pressure.
// Test checks for missing data and changed/overwriten data.
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;
auto futures = run<std::vector<size_t>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
long long downcount = elems_per_thread;
std::vector<size_t> owned;
auto inserter =
insert_try<size_t, size_t, map_t>(acc, downcount, owned);
do {
do
{
inserter(rand(), index);
} while (downcount > 0);
@ -30,7 +34,8 @@ int main()
});
auto accessor = skiplist.access();
for (auto &owned : collect(futures)) {
for (auto &owned : collect(futures))
{
check_present_same<map_t>(accessor, owned);
}

View File

@ -1,8 +1,10 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 100000;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
// TODO: document the test
// This test checks insert_unique method under pressure.
// Threads will try to insert keys in the same order.
@ -11,18 +13,20 @@ constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;
auto futures = run<std::vector<size_t>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
long long downcount = elems_per_thread;
std::vector<size_t> owned;
auto inserter =
insert_try<size_t, size_t, map_t>(acc, downcount, owned);
for (int i = 0; downcount > 0; i++) {
for (int i = 0; downcount > 0; i++)
{
inserter(i, index);
}
@ -31,7 +35,8 @@ int main()
});
auto accessor = skiplist.access();
for (auto &owned : collect(futures)) {
for (auto &owned : collect(futures))
{
check_present_same<map_t>(accessor, owned);
}

View File

@ -1,21 +1,26 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 1e5;
// TODO: document the test
int main()
{
init_log();
memory_check(THREADS_NO, [&] {
ds::static_array<std::thread, THREADS_NO> threads;
map_t skiplist;
// put THREADS_NO * elems_per_thread items to the skiplist
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
accessor.insert(elem_i, elem_i);
}
},
@ -23,7 +28,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -34,11 +40,13 @@ int main()
"all elements in skiplist");
}
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
permanent_assert(accessor.remove(elem_i) == true, "");
}
},
@ -46,7 +54,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// // wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -61,8 +70,9 @@ int main()
// check count
{
size_t iterator_counter = 0;
auto accessor = skiplist.access();
for (auto elem : accessor) {
auto accessor = skiplist.access();
for (auto elem : accessor)
{
++iterator_counter;
cout << elem.first << " ";
}

View File

@ -1,13 +1,16 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elements = 2e6;
// Test for simple memory leaks
/**
* Put elements number of elements in the skiplist per each thread and see
* is there any memory leak
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,22 +1,30 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 1);
constexpr size_t THREADS_NO = std::min(max_no_threads, 1);
constexpr size_t elems_per_thread = 16e5;
// Known memory leak at 1,600,000 elements.
// TODO: Memory leak at 1,600,000 elements (Kruno wrote this here but
// the memory_check method had invalid implementation)
// 1. implement valid memory_check
// 2. analyse this code
// 3. fix the memory leak
// 4. write proper test
int main()
{
init_log();
memory_check(THREADS_NO, [&] {
ds::static_array<std::thread, THREADS_NO> threads;
map_t skiplist;
// put THREADS_NO * elems_per_thread items to the skiplist
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
accessor.insert(elem_i, elem_i);
}
},
@ -24,7 +32,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -35,11 +44,13 @@ int main()
"all elements in skiplist");
}
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
permanent_assert(accessor.remove(elem_i) == true, "");
}
},
@ -47,7 +58,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// // wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -62,8 +74,9 @@ int main()
// check count
{
size_t iterator_counter = 0;
auto accessor = skiplist.access();
for (auto elem : accessor) {
auto accessor = skiplist.access();
for (auto elem : accessor)
{
++iterator_counter;
cout << elem.first << " ";
}

View File

@ -7,13 +7,16 @@ constexpr size_t op_per_thread = 1e5;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 1;
// This test checks MultiIterator from multimap.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls which always
// succeed.
/**
* This test checks MultiIterator from multimap.
* Each thread removes random data. So removes are joint.
* Calls of remove method are interleaved with insert calls which always
* succeed.
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;

View File

@ -1,48 +1,57 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 1;
// This test checks MultiIterator remove method.
// Each thread removes random data. So removes are joint and scattered on same
// key values.
// Calls of remove method are interleaved with insert calls which always
// succeed.
/**
* This test checks MultiIterator remove method.
* Each thread removes random data. So removes are joint and scattered on same
* key values. Calls of remove method are interleaved with insert calls which
* always succeed.
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;
auto futures = run<std::pair<long long, std::vector<long long>>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long long> set(key_range, 0);
long long sum = 0;
do {
do
{
size_t num = rand();
auto data = rand() % max_number;
if (rand_op()) {
auto data = rand() % max_number;
if (rand_op())
{
int len = 0;
for (auto it = acc.find_multi(num); it.has_value();
it++) {
it++)
{
len++;
}
if (len > 0) {
if (len > 0)
{
int pos = rand() % len;
for (auto it = acc.find_multi(num); it.has_value();
it++) {
if (pos == 0) {
it++)
{
if (pos == 0)
{
auto data_r = it->second;
if (it.remove()) {
if (it.remove())
{
downcount--;
set[num]--;
sum -= data_r;
@ -55,7 +64,9 @@ int main()
pos--;
}
}
} else {
}
else
{
acc.insert(num, data);
downcount--;
set[num]++;
@ -67,10 +78,12 @@ int main()
});
long set[key_range] = {0};
long long sums = 0;
for (auto &data : collect(futures)) {
long long sums = 0;
for (auto &data : collect(futures))
{
sums += data.second.first;
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
set[i] += data.second.second[i];
}
}
@ -78,7 +91,8 @@ int main()
auto accessor = skiplist.access();
check_multi_iterator(accessor, key_range, set);
for (auto &e : accessor) {
for (auto &e : accessor)
{
set[e.first]--;
sums -= e.second;
}

View File

@ -1,42 +1,48 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// This test checks MultiIterator remove method ].
// Each thread removes all duplicate data on random key. So removes are joint
// and scattered on same
// key values.
// Calls of remove method are interleaved with insert calls which always
// succeed.
/**
* This test checks MultiIterator remove method. Each thread removes all
* duplicate data for a random key. So removes are joined and scattered on the
* same key values. Calls of remove method are interleaved with insert calls
* which always succeed.
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;
auto futures = run<std::pair<long long, std::vector<long long>>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long long> set(key_range, 0);
long long sum = 0;
do {
do
{
size_t num = rand();
auto data = rand() % max_number;
if (rand_op()) {
auto data = rand() % max_number;
if (rand_op())
{
auto it = acc.find_multi(num);
if (it.has_value()) {
if (it.has_value())
{
it++;
while (it.has_value()) {
while (it.has_value())
{
auto data_r = it->second;
if (it.remove()) {
if (it.remove())
{
downcount--;
set[num]--;
sum -= data_r;
@ -47,7 +53,9 @@ int main()
it++;
}
}
} else {
}
else
{
acc.insert(num, data);
downcount--;
set[num]++;
@ -59,10 +67,12 @@ int main()
});
long set[key_range] = {0};
long long sums = 0;
for (auto &data : collect(futures)) {
long long sums = 0;
for (auto &data : collect(futures))
{
sums += data.second.first;
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
set[i] += data.second.second[i];
}
}
@ -70,7 +80,8 @@ int main()
auto accessor = skiplist.access();
check_multi_iterator(accessor, key_range, set);
for (auto &e : accessor) {
for (auto &e : accessor)
{
set[e.first]--;
sums -= e.second;
}

View File

@ -1,12 +1,14 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks multimap.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls which always
@ -14,29 +16,35 @@ constexpr size_t no_insert_for_one_delete = 1;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;
std::atomic<long long> size(0);
auto futures = run<std::pair<long long, std::vector<long long>>>(
THREADS_NO, skiplist, [&size](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long long> set(key_range, 0);
long long sum = 0;
do {
do
{
size_t num = rand();
auto data = num % max_number;
if (rand_op()) {
if (acc.remove(num)) {
auto data = num % max_number;
if (rand_op())
{
if (acc.remove(num))
{
downcount--;
set[num]--;
sum -= data;
size--;
}
} else {
}
else
{
acc.insert(num, data);
downcount--;
set[num]++;
@ -49,11 +57,13 @@ int main()
});
long set[key_range] = {0};
long long sums = 0;
long long sums = 0;
long long size_calc = 0;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
sums += data.second.first;
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
set[i] += data.second.second[i];
size_calc += data.second.second[i];
}
@ -64,15 +74,18 @@ int main()
check_order<multimap_t>(accessor);
auto bef_it = accessor.end();
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
auto it = accessor.find(i);
if (set[i] > 0) {
if (set[i] > 0)
{
permanent_assert(it != accessor.end(),
"Multimap doesn't contain necessary element "
<< i);
if (bef_it == accessor.end()) bef_it = accessor.find(i);
for (int j = 0; j < set[i]; j++) {
for (int j = 0; j < set[i]; j++)
{
permanent_assert(
bef_it != accessor.end(),
"Previous iterator doesn't iterate through same "
@ -89,7 +102,8 @@ int main()
bef_it++;
}
for (int j = 0; j < set[i]; j++) {
for (int j = 0; j < set[i]; j++)
{
permanent_assert(it != accessor.end(),
"Iterator doesn't iterate through same "
"key entrys. Expected "
@ -110,7 +124,8 @@ int main()
}
}
for (auto &e : accessor) {
for (auto &e : accessor)
{
set[e.first]--;
sums -= e.second;
}

View File

@ -5,6 +5,8 @@ constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks multiset.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls which always
@ -12,6 +14,7 @@ constexpr size_t no_insert_for_one_delete = 1;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multiset_t skiplist;

View File

@ -6,6 +6,8 @@ constexpr size_t op_per_thread = 1e5;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks remove method under pressure.
// Threads will try to insert and remove keys aproximetly in the same order.
// This will force threads to compete intensly with each other.
@ -13,6 +15,7 @@ constexpr size_t no_insert_for_one_delete = 2;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -5,12 +5,15 @@ constexpr size_t key_range = 1e5;
constexpr size_t op_per_thread = 1e6;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks remove method under pressure.
// Each thread removes it's own data. So removes are disjoint.
// Calls of remove method are interleaved with insert calls.
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,12 +1,14 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks remove method under pressure.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
@ -18,23 +20,29 @@ int main()
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
long long sum = 0;
long long count = 0;
long long sum = 0;
long long count = 0;
do {
auto num = rand();
do
{
auto num = rand();
auto data = num % max_number;
if (rand_op()) {
if (acc.remove(num)) {
if (rand_op())
{
if (acc.remove(num))
{
sum -= data;
downcount--;
count--;
}
} else {
if (acc.insert(num, data).second) {
}
else
{
if (acc.insert(num, data).second)
{
sum += data;
downcount--;
count++;
@ -45,15 +53,17 @@ int main()
return std::pair<long long, long long>(sum, count);
});
auto accessor = skiplist.access();
long long sums = 0;
auto accessor = skiplist.access();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : accessor) {
for (auto &e : accessor)
{
sums -= e.second;
}
permanent_assert(sums == 0, "Aproximetly Same values are present");

View File

@ -5,12 +5,15 @@ constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks set.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
int main()
{
init_log();
memory_check(THREADS_NO, [] {
set_t skiplist;

View File

@ -8,6 +8,8 @@ constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 5;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test simulates behavior of transactions.
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
@ -15,6 +17,7 @@ constexpr size_t no_insert_for_one_delete = 1;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,11 +1,21 @@
#include <iostream>
#include <chrono>
#include "gtest/gtest.h"
#include "logging/default.cpp"
#include "utils/timer/timer.hpp"
#include "utils/assert.hpp"
using namespace std::chrono_literals;
/**
* Creates a test timer which will log timeout message at the timeout event.
*
* @param counter how many time units the timer has to wait
*
* @return shared pointer to a timer
*/
Timer::sptr create_test_timer(int64_t counter)
{
return std::make_shared<Timer>(
@ -13,16 +23,38 @@ Timer::sptr create_test_timer(int64_t counter)
);
}
int main(void)
TEST(TimerSchedulerTest, TimerSchedulerExecution)
{
// initialize the timer
TimerScheduler<TimerSet, std::chrono::seconds> timer_scheduler;
// run the timer
timer_scheduler.run();
// add a couple of test timers
for (int64_t i = 1; i <= 3; ++i) {
timer_scheduler.add(create_test_timer(i));
}
// wait for that timers
std::this_thread::sleep_for(4s);
ASSERT_EQ(timer_scheduler.size(), 0);
// add another test timer
timer_scheduler.add(create_test_timer(1));
// wait for another timer
std::this_thread::sleep_for(2s);
// the test is done
timer_scheduler.stop();
return 0;
ASSERT_EQ(timer_scheduler.size(), 0);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,70 @@
CREATE (g:garment {garment_id: 1234, garment_category_id: 1, conceals: 30}) RETURN g
MATCH(g:garment {garment_id: 1234}) SET g:AA RETURN g
MATCH(g:garment {garment_id: 1234}) SET g:BB RETURN g
MATCH(g:garment {garment_id: 1234}) SET g:EE RETURN g
CREATE (g:garment {garment_id: 2345, garment_category_id: 6, reveals: 10}) RETURN g
MATCH(g:garment {garment_id: 2345}) SET g:CC RETURN g
MATCH(g:garment {garment_id: 2345}) SET g:DD RETURN g
CREATE (g:garment {garment_id: 3456, garment_category_id: 8}) RETURN g
MATCH(g:garment {garment_id: 3456}) SET g:CC RETURN g
MATCH(g:garment {garment_id: 3456}) SET g:DD RETURN g
CREATE (g:garment {garment_id: 4567, garment_category_id: 15}) RETURN g
MATCH(g:garment {garment_id: 4567}) SET g:AA RETURN g
MATCH(g:garment {garment_id: 4567}) SET g:BB RETURN g
MATCH(g:garment {garment_id: 4567}) SET g:DD RETURN g
CREATE (g:garment {garment_id: 5678, garment_category_id: 19}) RETURN g
MATCH(g:garment {garment_id: 5678}) SET g:BB RETURN g
MATCH(g:garment {garment_id: 5678}) SET g:CC RETURN g
MATCH(g:garment {garment_id: 5678}) SET g:EE RETURN g
CREATE (g:garment {garment_id: 6789, garment_category_id: 3}) RETURN g
MATCH(g:garment {garment_id: 6789}) SET g:AA RETURN g
MATCH(g:garment {garment_id: 6789}) SET g:DD RETURN g
MATCH(g:garment {garment_id: 6789}) SET g:EE RETURN g
CREATE (g:garment {garment_id: 7890, garment_category_id: 25}) RETURN g
MATCH(g:garment {garment_id: 7890}) SET g:AA RETURN g
MATCH(g:garment {garment_id: 7890}) SET g:BB RETURN g
MATCH(g:garment {garment_id: 7890}) SET g:CC RETURN g
MATCH(g:garment {garment_id: 7890}) SET g:EE RETURN g
MATCH (g1:garment {garment_id: 1234}), (g2:garment {garment_id: 4567}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 1234}), (g2:garment {garment_id: 5678}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 1234}), (g2:garment {garment_id: 6789}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 1234}), (g2:garment {garment_id: 7890}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 4567}), (g2:garment {garment_id: 6789}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 4567}), (g2:garment {garment_id: 7890}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 4567}), (g2:garment {garment_id: 5678}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 6789}), (g2:garment {garment_id: 7890}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 5678}), (g2:garment {garment_id: 7890}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 2345}), (g2:garment {garment_id: 3456}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 2345}), (g2:garment {garment_id: 5678}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 2345}), (g2:garment {garment_id: 6789}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 2345}), (g2:garment {garment_id: 7890}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 2345}), (g2:garment {garment_id: 4567}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 3456}), (g2:garment {garment_id: 5678}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 3456}), (g2:garment {garment_id: 6789}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 3456}), (g2:garment {garment_id: 7890}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
MATCH (g1:garment {garment_id: 3456}), (g2:garment {garment_id: 4567}) CREATE (g1)-[r:default_outfit]->(g2) RETURN r
CREATE (p:profile {profile_id: 111, partner_id: 55, reveals: 30}) RETURN p
CREATE (p:profile {profile_id: 112, partner_id: 55}) RETURN p
CREATE (p:profile {profile_id: 112, partner_id: 77, conceals: 10}) RETURN p
MATCH (p:profile {profile_id: 111, partner_id: 55}), (g:garment {garment_id: 1234}) CREATE (p)-[s:score]->(g) SET s.score=1500 RETURN s
MATCH (p:profile {profile_id: 111, partner_id: 55}), (g:garment {garment_id: 2345}) CREATE (p)-[s:score]->(g) SET s.score=1200 RETURN s
MATCH (p:profile {profile_id: 111, partner_id: 55}), (g:garment {garment_id: 3456}) CREATE (p)-[s:score]->(g) SET s.score=1000 RETURN s
MATCH (p:profile {profile_id: 111, partner_id: 55}), (g:garment {garment_id: 4567}) CREATE (p)-[s:score]->(g) SET s.score=1000 RETURN s
MATCH (p:profile {profile_id: 111, partner_id: 55}), (g:garment {garment_id: 6789}) CREATE (p)-[s:score]->(g) SET s.score=1500 RETURN s
MATCH (p:profile {profile_id: 111, partner_id: 55}), (g:garment {garment_id: 7890}) CREATE (p)-[s:score]->(g) SET s.score=1800 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 55}), (g:garment {garment_id: 1234}) CREATE (p)-[s:score]->(g) SET s.score=2000 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 55}), (g:garment {garment_id: 4567}) CREATE (p)-[s:score]->(g) SET s.score=1500 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 55}), (g:garment {garment_id: 5678}) CREATE (p)-[s:score]->(g) SET s.score=1000 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 55}), (g:garment {garment_id: 6789}) CREATE (p)-[s:score]->(g) SET s.score=1600 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 55}), (g:garment {garment_id: 7890}) CREATE (p)-[s:score]->(g) SET s.score=1900 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 77}), (g:garment {garment_id: 1234}) CREATE (p)-[s:score]->(g) SET s.score=1500 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 77}), (g:garment {garment_id: 2345}) CREATE (p)-[s:score]->(g) SET s.score=1300 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 77}), (g:garment {garment_id: 3456}) CREATE (p)-[s:score]->(g) SET s.score=1300 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 77}), (g:garment {garment_id: 5678}) CREATE (p)-[s:score]->(g) SET s.score=1200 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 77}), (g:garment {garment_id: 6789}) CREATE (p)-[s:score]->(g) SET s.score=1700 RETURN s
MATCH (p:profile {profile_id: 112, partner_id: 77}), (g:garment {garment_id: 7890}) CREATE (p)-[s:score]->(g) SET s.score=1900 RETURN s
MATCH (a:garment)-[:default_outfit]-(b:garment)-[:default_outfit]-(c:garment)-[:default_outfit]-(d:garment)-[:default_outfit]-(a:garment)-[:default_outfit]-(c:garment), (b:garment)-[:default_outfit]-(d:garment) WHERE a.garment_id = 1234 RETURN a.garment_id, b.garment_id, c.garment_id, d.garment_id ORDER BY (a.score + b.score + c.score + d.score) DESC LIMIT 10

View File

@ -2,26 +2,29 @@ CREATE (n:LABEL {name: "TEST01"}) RETURN n
CREATE (n:LABEL {name: "TEST02"}) RETURN n
CREATE (n:LABEL {name: "TEST2"}) RETURN n
CREATE (n:LABEL {name: "TEST3"}) RETURN n
CREATE (n:OTHER {name: "TEST4"}) RETURN n
CREATE (n:ACCOUNT {id: 2322, name: "TEST", country: "Croatia", "created_at": 2352352}) RETURN n
MATCH (n {id: 0}) RETURN n", "MATCH (n {id: 1}) RETURN n
MATCH (n {id: 2}) RETURN n", "MATCH (n {id: 3}) RETURN n
MATCH (n {id: 0}) RETURN n
MATCH (n {id: 1}) RETURN n
MATCH (n {id: 2}) RETURN n
MATCH (n {id: 3}) RETURN n
MATCH (a {id:0}), (p {id: 1}) CREATE (a)-[r:IS]->(p) RETURN r
MATCH (a {id:1}), (p {id: 2}) CREATE (a)-[r:IS]->(p) RETURN r
MATCH ()-[r]-() WHERE ID(r)=0 RETURN r
MATCH ()-[r]-() WHERE ID(r)=1 RETURN r
MATCH (n: {id: 0}) SET n.name = "TEST100" RETURN n
MATCH (n: {id: 1}) SET n.name = "TEST101" RETURN n
MATCH (n: {id: 0}) SET n.name = "TEST102" RETURN n
MATCH (n:LABEL) RETURN n"
MATCH (n {id: 0}) SET n.name = "TEST100" RETURN n
MATCH (n {id: 1}) SET n.name = "TEST101" RETURN n
MATCH (n {id: 0}) SET n.name = "TEST102" RETURN n
MATCH (n:LABEL) RETURN n
MATCH (n1), (n2) WHERE ID(n1)=0 AND ID(n2)=1 CREATE (n1)<-[r:IS {age: 25,weight: 70}]-(n2) RETURN r
MATCH (n) RETURN n", "MATCH (n:LABEL) RETURN n", "MATCH (n) DELETE n
MATCH (n:LABEL) DELETE n", "MATCH (n) WHERE ID(n) = 0 DELETE n
MATCH ()-[r]-() WHERE ID(r) = 0 DELETE r", "MATCH ()-[r]-() DELETE r
MATCH (n) RETURN n
MATCH (n:LABEL) RETURN n
MATCH (n) DELETE n
MATCH (n:LABEL) DELETE n
MATCH (n) WHERE ID(n) = 0 DELETE n
MATCH ()-[r]-() WHERE ID(r) = 0 DELETE r
MATCH ()-[r]-() DELETE r
MATCH ()-[r:TYPE]-() DELETE r
MATCH (n)-[:TYPE]->(m) WHERE ID(n) = 0 RETURN m
MATCH (n)-[:TYPE]->(m) WHERE n.name = "kruno" RETURN m
MATCH (n)-[:TYPE]->(m) WHERE n.name = "kruno" RETURN n,m
MATCH (n:LABEL)-[:TYPE]->(m) RETURN n"
CREATE (n:LABEL1:LABEL2 {name: "TEST01", age: 20}) RETURN n
MATCH (n:LABEL1:LABEL2 {name: "TEST01", age: 20}) RETURN n
MATCH (n:LABEL)-[:TYPE]->(m) RETURN n

View File

@ -1,2 +0,0 @@
MERGE (g1:garment {garment_id: 1234})-[r:default_outfit]-(g2:garment {garment_id: 2345}) RETURN r
MATCH (p:profile {profile_id: 111, partner_id: 55})-[s:score]-(g.garment {garment_id: 1234}) DELETE s

View File

@ -0,0 +1,43 @@
find_package(Threads REQUIRED)
# set current directory name as a test type
get_filename_component(test_type ${CMAKE_CURRENT_SOURCE_DIR} NAME)
# get all cpp abs file names recursively starting from current directory
file(GLOB test_type_cpps *.cpp)
message(STATUS "Available ${test_type} cpp files are: ${test_type_cpps}")
# for each cpp file build binary and register test
foreach(test_cpp ${test_type_cpps})
# get exec name (remove extension from the abs path)
get_filename_component(exec_name ${test_cpp} NAME_WE)
# set target name in format {project_name}_{test_type}_{exec_name}
set(target_name ${project_name}_${test_type}_${exec_name})
# build exec file
add_executable(${target_name} ${test_cpp})
set_property(TARGET ${target_name} PROPERTY CXX_STANDARD ${cxx_standard})
# OUTPUT_NAME sets the real name of a target when it is built and can be
# used to help create two targets of the same name even though CMake
# requires unique logical target names
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
# link libraries
# filesystem
target_link_libraries(${target_name} stdc++fs)
# threads (cross-platform)
target_link_libraries(${target_name} Threads::Threads)
# memgraph lib
target_link_libraries(${target_name} memgraph_lib)
# fmt format lib
target_link_libraries(${target_name} ${fmt_static_lib})
# yaml parser lib
target_link_libraries(${target_name} ${yaml_static_lib})
# register test
add_test(${target_name} ${exec_name})
endforeach()

View File

@ -17,7 +17,7 @@ auto load_basic_functions(Db &db)
vertex_accessor.set(property_key, std::move(args[0]));
return t.commit();
};
functions[11597417457737499503u] = create_node;
functions[3191791685918807343u] = create_node;
// CREATE (n:LABEL {name: "TEST"}) RETURN n;
auto create_labeled_and_named_node = [&db](properties_t &&args) {
@ -29,6 +29,19 @@ auto load_basic_functions(Db &db)
vertex_accessor.add_label(label);
return t.commit();
};
functions[8273374963505210457u] = create_labeled_and_named_node;
// CREATE (n:OTHER {name: "cleaner_test"}) RETURN n
auto create_node_with_other_label = [&db](properties_t &&args) {
DbAccessor t(db);
auto property_key = t.vertex_property_key("name", args[0].key.flags());
auto &label = t.label_find_or_create("OTHER");
auto vertex_accessor = t.vertex_insert();
vertex_accessor.set(property_key, std::move(args[0]));
vertex_accessor.add_label(label);
return t.commit();
};
functions[6237439055665132277u] = create_node_with_other_label;
// CREATE (n:OTHER {name: "TEST"}) RETURN n;
auto create_labeled_and_named_node_v2 = [&db](properties_t &&args) {
@ -40,7 +53,9 @@ auto load_basic_functions(Db &db)
vertex_accessor.add_label(label);
return t.commit();
};
functions[832997784138269151u] = create_labeled_and_named_node_v2;
// CREATE (n:ACCOUNT {id: 2322, name: "TEST", country: "Croatia", "created_at": 2352352}) RETURN n
auto create_account = [&db](properties_t &&args) {
DbAccessor t(db);
auto prop_id = t.vertex_property_key("id", args[0].key.flags());
@ -58,7 +73,12 @@ auto load_basic_functions(Db &db)
vertex_accessor.add_label(label);
return t.commit();
};
functions[16701745788564313211u] = create_account;
// TODO: inconsistency but it doesn't affect the integration tests
// this is not a unique case
// MATCH (n) WHERE ID(n) = 1 RETURN n
// MATCH (n {id: 0}) RETURN n
auto find_node_by_internal_id = [&db](properties_t &&args) {
DbAccessor t(db);
auto maybe_va = t.vertex_find(Id(args[0].as<Int64>().value()));
@ -75,7 +95,10 @@ auto load_basic_functions(Db &db)
}
return t.commit();
};
functions[1444315501940151196u] = find_node_by_internal_id;
functions[11624983287202420303u] = find_node_by_internal_id;
// MATCH (a {id:0}), (p {id: 1}) CREATE (a)-[r:IS]->(p) RETURN r
auto create_edge = [&db](properties_t &&args) {
DbAccessor t(db);
auto &edge_type = t.type_find_or_create("IS");
@ -98,7 +121,9 @@ auto load_basic_functions(Db &db)
return ret;
};
functions[6972641167053231355u] = create_edge;
// MATCH ()-[r]-() WHERE ID(r)=0 RETURN r
auto find_edge_by_internal_id = [&db](properties_t &&args) {
DbAccessor t(db);
auto maybe_ea = t.edge_find(args[0].as<Int64>().value());
@ -122,7 +147,9 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[15080095524051312786u] = find_edge_by_internal_id;
// MATCH (n {id: 0}) SET n.name = "TEST102" RETURN n
auto update_node = [&db](properties_t &&args) {
DbAccessor t(db);
auto prop_name = t.vertex_property_key("name", args[1].key.flags());
@ -136,6 +163,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[2835161674800069655u] = update_node;
// MATCH (n1), (n2) WHERE ID(n1)=0 AND ID(n2)=1 CREATE (n1)<-[r:IS {age: 25,
// weight: 70}]-(n2) RETURN r
@ -157,6 +185,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[10360716473890539004u] = create_edge_v2;
// MATCH (n) RETURN n
auto match_all_nodes = [&db](properties_t &&args) {
@ -167,6 +196,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[5949923385370229113u] = match_all_nodes;
// MATCH (n:LABEL) RETURN n
auto match_by_label = [&db](properties_t &&args) {
@ -181,6 +211,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[16533049303627288013u] = match_by_label;
// MATCH (n) DELETE n
auto match_all_delete = [&db](properties_t &&args) {
@ -196,6 +227,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[16628411757092333638u] = match_all_delete;
// MATCH (n:LABEL) DELETE n
auto match_label_delete = [&db](properties_t &&args) {
@ -208,6 +240,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[10022871879682099034u] = match_label_delete;
// MATCH (n) WHERE ID(n) = id DELETE n
auto match_id_delete = [&db](properties_t &&args) {
@ -221,6 +254,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[5375628876334795080u] = match_id_delete;
// MATCH ()-[r]-() WHERE ID(r) = id DELETE r
auto match_edge_id_delete = [&db](properties_t &&args) {
@ -234,15 +268,17 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[11747491556476630933u] = match_edge_id_delete;
// MATCH ()-[r]-() DELETE r
auto match_edge_all_delete = [&db](properties_t &&args) {
auto match_edge_all_delete = [&db](properties_t &&) {
DbAccessor t(db);
t.edge_access().fill().for_all([&](auto a) { a.remove(); });
return t.commit();
};
functions[10064744449500095415u] = match_edge_all_delete;
// MATCH ()-[r:TYPE]-() DELETE r
auto match_edge_type_delete = [&db](properties_t &&args) {
@ -254,6 +290,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[6084209470626828855u] = match_edge_type_delete;
// MATCH (n)-[:TYPE]->(m) WHERE ID(n) = id RETURN m
auto match_id_type_return = [&db](properties_t &&args) {
@ -275,6 +312,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[2605621337795673948u] = match_id_type_return;
// MATCH (n)-[:TYPE]->(m) WHERE n.name = "kruno" RETURN m
auto match_name_type_return = [&db](properties_t &&args) {
@ -313,6 +351,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[17303982256920342123u] = match_name_type_return;
// MATCH (n)-[:TYPE]->(m) WHERE n.name = "kruno" RETURN n,m
auto match_name_type_return_cross = [&db](properties_t &&args) {
@ -393,6 +432,7 @@ auto load_basic_functions(Db &db)
return t.commit();
};
functions[17456874322957005665u] = match_name_type_return_cross;
// MATCH (n:LABEL)-[:TYPE]->(m) RETURN n
auto match_label_type_return = [&db](properties_t &&args) {
@ -433,8 +473,8 @@ auto load_basic_functions(Db &db)
t.abort();
return false;
}
};
functions[4866842751631597263u] = match_label_type_return;
// MATCH (n:LABEL {name: "TEST01"}) RETURN n;
auto match_label_property = [&db](properties_t &&args) {
@ -454,33 +494,7 @@ auto load_basic_functions(Db &db)
return false;
}
};
functions[17721584194272598838u] = match_label_property;
functions[15284086425088081497u] = match_all_nodes;
functions[4857652843629217005u] = match_by_label;
functions[15648836733456301916u] = create_edge_v2;
functions[10597108978382323595u] = create_account;
functions[5397556489557792025u] = create_labeled_and_named_node;
// TODO: query hasher reports two hash values
functions[998725786176032607u] = create_labeled_and_named_node_v2;
functions[16090682663946456821u] = create_labeled_and_named_node_v2;
functions[7939106225150551899u] = create_edge;
functions[6579425155585886196u] = create_edge;
functions[11198568396549106428u] = find_node_by_internal_id;
functions[8320600413058284114u] = find_edge_by_internal_id;
functions[6813335159006269041u] = update_node;
functions[10506105811763742758u] = match_all_delete;
functions[13742779491897528506u] = match_label_delete;
functions[11349462498691305864u] = match_id_delete;
functions[6963549500479100885u] = match_edge_id_delete;
functions[14897166600223619735u] = match_edge_all_delete;
functions[16888549834923624215u] = match_edge_type_delete;
functions[11675960684124428508u] = match_id_type_return;
functions[15698881472054193835u] = match_name_type_return;
functions[12595102442911913761u] = match_name_type_return_cross;
functions[8918221081398321263u] = match_label_type_return;
functions[7710665404758409302u] = match_label_property;
return functions;
}

View File

@ -633,6 +633,10 @@ auto load_dressipi_functions(Db &db)
return t.commit();
};
// Query: MATCH (a:garment)-[:default_outfit]-(b:garment)-[:default_outfit]-(c:garment)-[:default_outfit]-(d:garment)-[:default_outfit]-(a:garment)-[:default_outfit]-(c:garment), (b:garment)-[:default_outfit]-(d:garment) WHERE a.garment_id = 1234 RETURN a.garment_id, b.garment_id, c.garment_id, d.garment_id ORDER BY (a.score + b.score + c.score + d.score) DESC LIMIT 10
// Hash: 11856262817829095719
// TODO: automate
return functions;
}
}

View File

@ -1,19 +1,24 @@
#include "_hardcoded_query/basic.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "query/preprocesor.hpp"
#include "query/strip/stripper.hpp"
#include "utils/assert.hpp"
#include "utils/sysinfo/memory.hpp"
template <class S, class Q>
void run(size_t n, std::string &query, S &stripper, Q &qf)
QueryPreprocessor preprocessor;
template <class Q>
void run(size_t n, std::string &query, Q &qf)
{
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
<< std::endl;
auto stripped = preprocessor.preprocess(query);
logging::info("Running query [{}] x {}.", stripped.hash, n);
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
assert(qf[stripped.hash](std::move(vec)));
permanent_assert(qf[stripped.hash](std::move(vec)), "Query failed!");
}
}
@ -29,13 +34,10 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
size_t entities_number = 1000;
Db db("cleaning");
auto query_functions = hardcode::load_basic_functions(db);
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
size_t entities_number = 1000;
auto query_functions = hardcode::load_basic_functions(db);
std::string create_vertex_label =
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
@ -49,17 +51,21 @@ int main(void)
// clean vertices
// delete vertices a
// clean vertices
run(entities_number, create_vertex_label, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(entities_number, create_vertex_label, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == entities_number);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match (after cleaning)");
run(1, delete_label_vertices, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(1, delete_label_vertices, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match (delete label vertices)");
clean_vertex(db);
assert(db.graph.vertices.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0,
"Db should be empty");
// ******************************* TEST 2 ********************************//
// add vertices a
@ -68,26 +74,33 @@ int main(void)
// delete vertices a
// clean vertices
// delete vertices all
run(entities_number, create_vertex_label, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(entities_number, create_vertex_label, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
run(entities_number, create_vertex_other, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number * 2);
run(entities_number, create_vertex_other, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number * 2,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == entities_number * 2);
permanent_assert(db.graph.vertices.access().size() == entities_number * 2,
"Entities number doesn't match");
run(1, delete_label_vertices, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number * 2);
run(1, delete_label_vertices, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number * 2,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == entities_number);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
run(1, delete_all_vertices, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(1, delete_all_vertices, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0,
"Db should be empty");
// TODO: more tests

View File

@ -0,0 +1,185 @@
#include <iostream>
#include <queue>
#include <string>
#include <vector>
#include "query/i_plan_cpu.hpp"
#include "query/util.hpp"
#include "storage/edge_x_vertex.hpp"
#include "storage/model/properties/all.hpp"
#include "storage/vertex_accessor.hpp"
#include "using.hpp"
#include "utils/memory/stack_allocator.hpp"
using std::cout;
using std::endl;
// Dressipi astar query of 4 clicks.
// TODO: figure out from the pattern in a query
constexpr size_t max_depth = 3;
// TODO: from query LIMIT 10
constexpr size_t limit = 10;
class Node
{
public:
Node *parent = {nullptr};
VertexPropertyType<Float> tkey;
double cost;
int depth = {0};
double sum = {0.0};
VertexAccessor vacc;
Node(VertexAccessor vacc, double cost,
VertexPropertyType<Float> const &tkey)
: cost(cost), vacc(vacc), tkey(tkey)
{
}
Node(VertexAccessor vacc, double cost, Node *parent,
VertexPropertyType<Float> const &tkey)
: cost(cost), vacc(vacc), parent(parent), depth(parent->depth + 1),
tkey(tkey)
{
}
double sum_vertex_score()
{
auto now = this;
double sum = 0;
do
{
sum += (now->vacc.at(tkey).get())->value();
now = now->parent;
} while (now != nullptr);
this->sum = sum;
return sum;
}
};
bool vertex_filter_contained(DbAccessor &t, VertexAccessor &v, Node *before)
{
if (v.fill())
{
bool found;
do
{
found = false;
before = before->parent;
if (before == nullptr)
{
return true;
}
} while (v.in_contains(before->vacc));
}
return false;
}
template <typename Stream>
auto astar(VertexAccessor &va, DbAccessor &t, plan_args_t &, Stream &)
{
StackAllocator stack;
std::vector<Node *> results;
// TODO: variable part (extract)
VertexPropertyType<Float> tkey = t.vertex_property_key<Float>("score");
auto cmp = [](Node *left, Node *right) { return left->cost > right->cost; };
std::priority_queue<Node *, std::vector<Node *>, decltype(cmp)> queue(cmp);
Node *start = new (stack.allocate<Node>()) Node(va, 0, tkey);
queue.push(start);
size_t count = 0;
do
{
auto now = queue.top();
queue.pop();
if (now->depth >= max_depth)
{
now->sum_vertex_score();
results.emplace_back(now);
count++;
if (count >= limit)
{
// the limit was reached -> STOP the execution
break;
}
// if the limit wasn't reached -> POP the next vertex
continue;
}
iter::for_all(now->vacc.out(), [&](auto edge) {
VertexAccessor va = edge.to();
if (vertex_filter_contained(t, va, now))
{
auto cost = 1 - va.at(tkey).get()->value();
Node *n = new (stack.allocate<Node>())
Node(va, now->cost + cost, now, tkey);
queue.push(n);
}
});
} while (!queue.empty());
stack.free();
return results;
}
void reverse_stream_ids(Node *node, Stream& stream, VertexPropertyKey key)
{
if (node == nullptr)
return;
reverse_stream_ids(node->parent, stream, key);
stream.write(node->vacc.at(key).template as<Int64>());
}
class PlanCPU : public IPlanCPU<Stream>
{
public:
bool run(Db &db, plan_args_t &args, Stream &stream) override
{
DbAccessor t(db);
indices_t indices = {{"garment_id", 0}};
auto properties = query_properties(indices, args);
auto &label = t.label_find_or_create("garment");
auto garment_id_prop_key =
t.vertex_property_key("garment_id", args[0].key.flags());
stream.write_fields(
{{"a.garment_id", "b.garment_id", "c.garment_id", "d.garment_id"}});
label.index()
.for_range(t)
.properties_filter(t, properties)
.for_all([&](auto va) {
auto results = astar(va, t, args, stream);
std::sort(results.begin(), results.end(),
[](Node *a, Node *b) { return a->sum > b->sum; });
for (auto node : results)
{
stream.write_record();
stream.write_list_header(max_depth + 1);
reverse_stream_ids(node, stream, garment_id_prop_key);
}
});
stream.write_empty_fields();
stream.write_meta("r");
return t.commit();
}
~PlanCPU() {}
};
extern "C" IPlanCPU<Stream> *produce() { return new PlanCPU(); }
extern "C" void destruct(IPlanCPU<Stream> *p) { delete p; }

View File

@ -3,10 +3,16 @@
#include "_hardcoded_query/basic.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "query/preprocesor.hpp"
#include "query/strip/stripper.hpp"
#include "storage/indexes/indexes.hpp"
#include "utils/assert.hpp"
#include "utils/signals/handler.hpp"
#include "utils/stacktrace/log.hpp"
#include "utils/sysinfo/memory.hpp"
QueryPreprocessor preprocessor;
// Returns uniform random size_t generator from range [0,n>
auto rand_gen(size_t n)
{
@ -17,44 +23,43 @@ auto rand_gen(size_t n)
void run(size_t n, std::string &query, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
auto stripped = preprocessor.preprocess(query);
auto qf = hardcode::load_basic_functions(db);
logging::info("Running query [{}] x {}.", stripped.hash, n);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
<< std::endl;
for (int i = 0; i < n; i++) {
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
assert(qf[stripped.hash](std::move(vec)));
auto commited = qf[stripped.hash](std::move(vec));
permanent_assert(commited, "Query execution failed");
}
}
void add_edge(size_t n, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
std::string query = "MATCH (n1), (n2) WHERE ID(n1)=0 AND "
"ID(n2)=1 CREATE (n1)<-[r:IS {age: "
"25,weight: 70}]-(n2) RETURN r";
auto stripped = preprocessor.preprocess(query);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n
<< " time to add edge." << std::endl;
logging::info("Running query [{}] (add edge) x {}", stripped.hash, n);
std::vector<int64_t> vertices;
for (auto &v : db.graph.vertices.access()) {
for (auto &v : db.graph.vertices.access())
{
vertices.push_back(v.second.id);
}
permanent_assert(vertices.size() > 0, "Vertices size is zero");
auto rand = rand_gen(vertices.size());
for (int i = 0; i < n; i++) {
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
assert(qf[stripped.hash](std::move(vec)));
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
permanent_assert(qf[stripped.hash](std::move(vec)), "Add edge failed");
}
}
@ -64,7 +69,7 @@ void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
t.vertex_access().fill().update().for_all([&](auto va) { va.set(prop); });
assert(t.commit());
permanent_assert(t.commit(), "Add property failed");
}
void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
@ -79,7 +84,7 @@ void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
i++;
});
assert(t.commit());
permanent_assert(t.commit(), "Add vertex property serial int failed");
}
void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
@ -94,7 +99,7 @@ void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
i++;
});
assert(t.commit());
permanent_assert(t.commit(), "Add Edge property serial int failed");
}
template <class TG>
@ -103,8 +108,9 @@ size_t size(Db &db, IndexHolder<TG, std::nullptr_t> &h)
DbAccessor t(db);
size_t count = 0;
auto oin = h.get_read();
if (oin.is_present()) {
auto oin = h.get_read();
if (oin.is_present())
{
oin.get()->for_range(t).for_all([&](auto va) mutable { count++; });
}
@ -115,8 +121,10 @@ size_t size(Db &db, IndexHolder<TG, std::nullptr_t> &h)
void assert_empty(Db &db)
{
assert(db.graph.vertices.access().size() == 0);
assert(db.graph.edges.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0,
"DB isn't empty (vertices)");
permanent_assert(db.graph.edges.access().size() == 0,
"DB isn't empty (edges)");
}
void clean_vertex(Db &db)
@ -136,7 +144,7 @@ void clean_edge(Db &db)
void clear_database(Db &db)
{
std::string delete_all_vertices = "MATCH (n) DELETE n";
std::string delete_all_edges = "MATCH ()-[r]-() DELETE r";
std::string delete_all_edges = "MATCH ()-[r]-() DELETE r";
run(1, delete_all_edges, db);
run(1, delete_all_vertices, db);
@ -151,14 +159,16 @@ bool equal(Db &a, Db &b)
auto acc_a = a.graph.vertices.access();
auto acc_b = b.graph.vertices.access();
if (acc_a.size() != acc_b.size()) {
if (acc_a.size() != acc_b.size())
{
return false;
}
auto it_a = acc_a.begin();
auto it_b = acc_b.begin();
for (auto i = acc_a.size(); i > 0; i--) {
for (auto i = acc_a.size(); i > 0; i--)
{
// TODO: compare
}
}
@ -167,14 +177,16 @@ bool equal(Db &a, Db &b)
auto acc_a = a.graph.edges.access();
auto acc_b = b.graph.edges.access();
if (acc_a.size() != acc_b.size()) {
if (acc_a.size() != acc_b.size())
{
return false;
}
auto it_a = acc_a.begin();
auto it_b = acc_b.begin();
for (auto i = acc_a.size(); i > 0; i--) {
for (auto i = acc_a.size(); i > 0; i--)
{
// TODO: compare
}
}
@ -187,6 +199,16 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
SignalHandler::register_handler(Signal::SegmentationFault, []() {
log_stacktrace("SegmentationFault signal raised");
std::exit(EXIT_FAILURE);
});
SignalHandler::register_handler(Signal::BusError, []() {
log_stacktrace("Bus error signal raised");
std::exit(EXIT_FAILURE);
});
size_t cvl_n = 1;
std::string create_vertex_label =
@ -194,7 +216,7 @@ int main(void)
std::string create_vertex_other =
"CREATE (n:OTHER {name: \"cleaner_test\"}) RETURN n";
std::string delete_label_vertices = "MATCH (n:LABEL) DELETE n";
std::string delete_all_vertices = "MATCH (n) DELETE n";
std::string delete_all_vertices = "MATCH (n) DELETE n";
IndexDefinition vertex_property_nonunique_unordered = {
IndexLocation{VertexSide, Option<std::string>("prop"),
@ -215,15 +237,19 @@ int main(void)
// ******************************* TEST 1 ********************************//
{
std::cout << "TEST1" << std::endl;
logging::info("TEST 1");
// add indexes
// add vertices LABEL
// add edges
// add vertices property
// assert index size.
Db db("index", false);
assert(db.indexes().add_index(vertex_property_nonunique_unordered));
assert(db.indexes().add_index(edge_property_nonunique_unordered));
permanent_assert(
db.indexes().add_index(vertex_property_nonunique_unordered),
"Add vertex index failed");
permanent_assert(
db.indexes().add_index(edge_property_nonunique_unordered),
"Add edge index failed");
run(cvl_n, create_vertex_label, db);
auto sp = StoredProperty<TypeGroupVertex>(
@ -232,18 +258,21 @@ int main(void)
.family_key());
add_property(db, sp);
assert(cvl_n ==
size(db, db.graph.vertices.property_family_find_or_create("prop")
.index));
permanent_assert(
cvl_n == size(db, db.graph.vertices
.property_family_find_or_create("prop")
.index),
"Create vertex property failed");
add_edge(cvl_n, db);
add_edge_property_serial_int(
db, db.graph.edges.property_family_find_or_create("prop"));
assert(
permanent_assert(
cvl_n ==
size(db,
db.graph.edges.property_family_find_or_create("prop").index));
size(db, db.graph.edges.property_family_find_or_create("prop")
.index),
"Create edge property failed");
}
// TODO: more tests

View File

@ -8,6 +8,7 @@
#include "utils/string/file.hpp"
#include "utils/variadic/variadic.hpp"
#include "utils/command_line/arguments.hpp"
#include "stream/print_record_stream.hpp"
Logger logger;
@ -15,10 +16,14 @@ int main(int argc, char *argv[])
{
auto arguments = all_arguments(argc, argv);
PrintRecordStream stream(std::cout);
// POSSIBILITIES: basic, dressipi
auto suite_name = get_argument(arguments, "-s", "basic");
// POSSIBILITIES: query_execution, hash_generation
auto work_mode = get_argument(arguments, "-w", "query_execution");
// POSSIBILITIES: mg_basic.txt, dressipi_basic.txt, dressipi_graph.txt
auto query_set_filename = get_argument(arguments, "-q", "mg_basic.txt");
// init logging
logging::init_sync();
@ -39,7 +44,7 @@ int main(int argc, char *argv[])
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
// load quries
std::string file_path = "data/queries/core/" + suite_name + ".txt";
std::string file_path = "data/queries/core/" + query_set_filename;
auto queries = utils::read_lines(file_path.c_str());
// execute all queries

View File

@ -1,12 +1,18 @@
#include <random>
#include "_hardcoded_query/basic.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "_hardcoded_query/basic.hpp"
#include "query/preprocesor.hpp"
#include "query/strip/stripper.hpp"
#include "storage/indexes/indexes.hpp"
#include "utils/assert.hpp"
#include "utils/signals/handler.hpp"
#include "utils/stacktrace/log.hpp"
#include "utils/sysinfo/memory.hpp"
QueryPreprocessor preprocessor;
// Returns uniform random size_t generator from range [0,n>
auto rand_gen(size_t n)
{
@ -17,32 +23,28 @@ auto rand_gen(size_t n)
void run(size_t n, std::string &query, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
auto stripped = preprocessor.preprocess(query);
auto qf = hardcode::load_basic_functions(db);
logging::info("Running query {} [{}] x {}.", query, stripped.hash, n);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
<< std::endl;
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
assert(qf[stripped.hash](std::move(vec)));
permanent_assert(qf[stripped.hash](std::move(vec)), "Query aborted");
}
}
void add_edge(size_t n, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
auto qf = hardcode::load_basic_functions(db);
std::string query = "MATCH (n1), (n2) WHERE ID(n1)=0 AND "
"ID(n2)=1 CREATE (n1)<-[r:IS {age: "
"25,weight: 70}]-(n2) RETURN r";
auto stripped = preprocessor.preprocess(query);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n
<< " time to add edge." << std::endl;
logging::info("Running query {} [{}] x {}.", query, stripped.hash, n);
std::vector<int64_t> vertices;
for (auto &v : db.graph.vertices.access())
@ -56,7 +58,7 @@ void add_edge(size_t n, Db &db)
properties_t vec = stripped.arguments;
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
assert(qf[stripped.hash](std::move(vec)));
permanent_assert(qf[stripped.hash](std::move(vec)), "Query aborted");
}
}
@ -66,7 +68,8 @@ void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
t.vertex_access().fill().for_all([&](auto va) { va.set(prop); });
assert(t.commit());
permanent_assert(t.commit(), "add property query aborted");
;
}
void add_property_different_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
@ -81,7 +84,7 @@ void add_property_different_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
i++;
});
assert(t.commit());
permanent_assert(t.commit(), "add property different int aborted");
}
size_t size(Db &db, IndexHolder<TypeGroupVertex, std::nullptr_t> &h)
@ -102,8 +105,8 @@ size_t size(Db &db, IndexHolder<TypeGroupVertex, std::nullptr_t> &h)
void assert_empty(Db &db)
{
assert(db.graph.vertices.access().size() == 0);
assert(db.graph.edges.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0, "Db isn't empty");
permanent_assert(db.graph.edges.access().size() == 0, "Db isn't empty");
}
void clean_vertex(Db &db)
@ -178,6 +181,11 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
SignalHandler::register_handler(Signal::SegmentationFault, []() {
log_stacktrace("SegmentationFault signal raised");
std::exit(EXIT_FAILURE);
});
size_t cvl_n = 1000;
std::string create_vertex_label =
@ -187,9 +195,8 @@ int main(void)
std::string delete_label_vertices = "MATCH (n:LABEL) DELETE n";
std::string delete_all_vertices = "MATCH (n) DELETE n";
// ******************************* TEST 1 ********************************//
{
std::cout << "TEST1" << std::endl;
logging::info("TEST 1");
// make snapshot of empty db
// add vertexs
// add edges
@ -203,11 +210,11 @@ int main(void)
clear_database(db);
db.snap_engine.import();
assert_empty(db);
logging::info("END of TEST 1");
}
// ******************************* TEST 2 ********************************//
{
std::cout << "TEST2" << std::endl;
logging::info("TEST 2");
// add vertexs
// add edges
// make snapshot of db
@ -223,13 +230,12 @@ int main(void)
db.snap_engine.import();
{
Db db2("snapshot");
assert(equal(db, db2));
permanent_assert(equal(db, db2), "Dbs aren't equal");
}
}
// ******************************* TEST 3 ********************************//
{
std::cout << "TEST3" << std::endl;
logging::info("TEST 3");
// add vertexs
// add edges
// make snapshot of db
@ -240,13 +246,12 @@ int main(void)
db.snap_engine.make_snapshot();
{
Db db2("not_snapshot");
assert(!equal(db, db2));
permanent_assert(!equal(db, db2), "Dbs are equal");
}
}
// ******************************* TEST 4 ********************************//
{
std::cout << "TEST4" << std::endl;
logging::info("TEST 4");
// add vertices LABEL
// add properties
// add vertices LABEL
@ -265,14 +270,17 @@ int main(void)
IndexLocation{VertexSide, Option<std::string>("prop"),
Option<std::string>(), Option<std::string>()},
IndexType{false, None}};
assert(db.indexes().add_index(idef));
assert(cvl_n == size(db, family.index));
permanent_assert(db.indexes().add_index(idef), "Index isn't added");
permanent_assert(cvl_n == size(db, family.index),
"Index size isn't valid");
db.snap_engine.make_snapshot();
{
Db db2("snapshot");
assert(cvl_n == size(db, db2.graph.vertices
.property_family_find_or_create("prop")
.index));
permanent_assert(
cvl_n == size(db, db2.graph.vertices
.property_family_find_or_create("prop")
.index),
"Index size isn't valid");
}
}

View File

@ -0,0 +1,140 @@
#pragma once
#include <string>
#include <vector>
#include <map>
#include "utils/exceptions/not_yet_implemented.hpp"
class PrintRecordStream
{
private:
std::ostream& stream;
public:
PrintRecordStream(std::ostream &stream) : stream(stream) {}
void write_success()
{
stream << "SUCCESS\n";
}
void write_success_empty()
{
stream << "SUCCESS EMPTY\n";
}
void write_ignored()
{
stream << "IGNORED\n";
}
void write_empty_fields()
{
stream << "EMPTY FIELDS\n";
}
void write_fields(const std::vector<std::string> &fields)
{
stream << "FIELDS:";
for (auto &field : fields)
{
stream << " " << field;
}
stream << '\n';
}
void write_field(const std::string &field)
{
stream << "Field: " << field << '\n';
}
void write_list_header(size_t size)
{
stream << "List: " << size << '\n';
}
void write_record()
{
stream << "Record\n";
}
void write_meta(const std::string &type)
{
stream << "Meta: " << type;
}
void write_failure(const std::map<std::string, std::string> &data)
{
throw NotYetImplemented();
}
void write_count(const size_t count)
{
throw NotYetImplemented();
}
void write(const VertexAccessor &vertex)
{
throw NotYetImplemented();
}
void write_vertex_record(const VertexAccessor& va)
{
throw NotYetImplemented();
}
void write(const EdgeAccessor &edge)
{
throw NotYetImplemented();
}
void write_edge_record(const EdgeAccessor& ea)
{
throw NotYetImplemented();
}
void write(const StoredProperty<TypeGroupEdge> &prop)
{
// prop.accept(serializer);
throw NotYetImplemented();
}
void write(const StoredProperty<TypeGroupVertex> &prop)
{
// prop.accept(serializer);
throw NotYetImplemented();
}
void write(const Null &prop)
{
throw NotYetImplemented();
}
void write(const Bool &prop)
{
throw NotYetImplemented();
}
void write(const Float &prop) { throw NotYetImplemented(); }
void write(const Int32 &prop) { throw NotYetImplemented(); }
void write(const Int64 &prop) { throw NotYetImplemented(); }
void write(const Double &prop) { throw NotYetImplemented(); }
void write(const String &prop) { throw NotYetImplemented(); }
void write(const ArrayBool &prop) { throw NotYetImplemented(); }
void write(const ArrayInt32 &prop) { throw NotYetImplemented(); }
void write(const ArrayInt64 &prop) { throw NotYetImplemented(); }
void write(const ArrayFloat &prop) { throw NotYetImplemented(); }
void write(const ArrayDouble &prop) { throw NotYetImplemented(); }
void write(const ArrayString &prop) { throw NotYetImplemented(); }
void send()
{
throw NotYetImplemented();
}
void chunk()
{
throw NotYetImplemented();
}
};

View File

@ -0,0 +1,44 @@
find_package(Threads REQUIRED)
# set current directory name as a test type
get_filename_component(test_type ${CMAKE_CURRENT_SOURCE_DIR} NAME)
# get all cpp abs file names recursively starting from current directory
file(GLOB_RECURSE test_type_cpps *.cpp)
message(STATUS "Available ${test_type} cpp files are: ${test_type_cpps}")
# for each cpp file build binary and register test
foreach(test_cpp ${test_type_cpps})
# get exec name (remove extension from the abs path)
get_filename_component(exec_name ${test_cpp} NAME_WE)
# set target name in format {project_name}_{test_type}_{exec_name}
set(target_name ${project_name}_${test_type}_${exec_name})
# build exec file
add_executable(${target_name} ${test_cpp})
set_property(TARGET ${target_name} PROPERTY CXX_STANDARD ${cxx_standard})
# OUTPUT_NAME sets the real name of a target when it is built and can be
# used to help create two targets of the same name even though CMake
# requires unique logical target names
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
# link libraries
# filesystem
target_link_libraries(${target_name} stdc++fs)
# threads (cross-platform)
target_link_libraries(${target_name} Threads::Threads)
# memgraph lib
target_link_libraries(${target_name} memgraph_lib)
# fmt format lib
target_link_libraries(${target_name} ${fmt_static_lib})
# yaml parser lib
target_link_libraries(${target_name} ${yaml_static_lib})
# cypher lib
target_link_libraries(${target_name} cypher_lib)
# dynamic lib
target_link_libraries(${target_name} dl)
endforeach()

Some files were not shown because too many files have changed in this diff Show More