Stop building storage v1

Reviewers: teon.banek

Reviewed By: teon.banek

Subscribers: pullbot

Differential Revision: https://phabricator.memgraph.io/D2618
This commit is contained in:
Matej Ferencevic 2020-01-13 13:36:40 +01:00
parent 79947c376b
commit 5906258de0
77 changed files with 4 additions and 9515 deletions

View File

@ -5,7 +5,6 @@
- build_release/memgraph
- build_release/memgraph_ha
- build_release/tools/src/mg_client
- build_release/tools/src/mg_import_csv
- config
filename: binaries.tar.gz

View File

@ -28,87 +28,6 @@ add_custom_target(generate_lcp_common DEPENDS ${generated_lcp_common_files})
# END Common LCP files
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Memgraph Single Node
# ----------------------------------------------------------------------------
set(mg_single_node_sources
${lcp_common_cpp_files}
audit/log.cpp
data_structures/concurrent/skiplist_gc.cpp
database/single_node/config.cpp
database/single_node/graph_db.cpp
database/single_node/graph_db_accessor.cpp
durability/single_node/state_delta.cpp
durability/single_node/paths.cpp
durability/single_node/recovery.cpp
durability/single_node/snapshooter.cpp
durability/single_node/wal.cpp
glue/auth.cpp
glue/communication.cpp
query/common.cpp
query/dump.cpp
query/frontend/ast/cypher_main_visitor.cpp
query/frontend/ast/pretty_print.cpp
query/frontend/parsing.cpp
query/frontend/semantic/required_privileges.cpp
query/frontend/semantic/symbol_generator.cpp
query/frontend/stripped.cpp
query/interpret/awesome_memgraph_functions.cpp
query/interpreter.cpp
query/plan/operator.cpp
query/plan/preprocess.cpp
query/plan/pretty_print.cpp
query/plan/profile.cpp
query/plan/rewrite/index_lookup.cpp
query/plan/rule_based_planner.cpp
query/plan/variable_start_planner.cpp
query/procedure/mg_procedure_impl.cpp
query/procedure/module.cpp
query/typed_value.cpp
storage/common/constraints/record.cpp
storage/common/constraints/unique_constraints.cpp
storage/common/locking/record_lock.cpp
storage/common/types/property_value_store.cpp
storage/single_node/edge_accessor.cpp
storage/single_node/record_accessor.cpp
storage/single_node/vertex_accessor.cpp
transactions/single_node/engine.cpp
memgraph_init.cpp
)
define_add_lcp(add_lcp_single_node mg_single_node_sources generated_lcp_single_node_files)
add_lcp_single_node(durability/single_node/state_delta.lcp)
add_custom_target(generate_lcp_single_node DEPENDS generate_lcp_common ${generated_lcp_single_node_files})
set(MG_SINGLE_NODE_LIBS stdc++fs Threads::Threads fmt cppitertools
antlr_opencypher_parser_lib dl glog gflags
mg-utils mg-io mg-requests mg-communication)
# These are enterprise subsystems
set(MG_SINGLE_NODE_LIBS ${MG_SINGLE_NODE_LIBS} mg-auth)
if (USE_LTALLOC)
list(APPEND MG_SINGLE_NODE_LIBS ltalloc)
# TODO(mferencevic): Enable this when clang is updated on apollo.
# set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -flto")
endif()
add_library(mg-single-node STATIC ${mg_single_node_sources})
target_include_directories(mg-single-node PUBLIC ${CMAKE_SOURCE_DIR}/include)
target_link_libraries(mg-single-node ${MG_SINGLE_NODE_LIBS})
add_dependencies(mg-single-node generate_opencypher_parser)
add_dependencies(mg-single-node generate_lcp_single_node)
target_compile_definitions(mg-single-node PUBLIC MG_SINGLE_NODE)
# NOTE: `include/mg_procedure.syms` describes a pattern match for symbols which
# should be dynamically exported, so that `dlopen` can correctly link the
# symbols in custom procedure module libraries.
target_link_libraries(mg-single-node "-Wl,--dynamic-list=${CMAKE_SOURCE_DIR}/include/mg_procedure.syms")
# ----------------------------------------------------------------------------
# END Memgraph Single Node
# ----------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Memgraph Single Node High Availability
# ----------------------------------------------------------------------------
@ -200,7 +119,7 @@ target_compile_definitions(mg-single-node-ha PUBLIC MG_SINGLE_NODE_HA)
# ----------------------------------------------------------------------------
add_custom_target(generate_lcp)
add_dependencies(generate_lcp generate_lcp_single_node generate_lcp_single_node_ha)
add_dependencies(generate_lcp generate_lcp_single_node_ha)
string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
@ -217,10 +136,6 @@ set(VERSION_STRING ${memgraph_VERSION})
configure_file(version.hpp.in version.hpp @ONLY)
include_directories(${CMAKE_CURRENT_BINARY_DIR})
# memgraph main executable (old storage)
add_executable(memgraph-v1 memgraph.cpp)
target_link_libraries(memgraph-v1 mg-single-node kvstore_lib telemetry_lib)
# ----------------------------------------------------------------------------
# Memgraph Single Node v2 Executable
# ----------------------------------------------------------------------------

View File

@ -18,18 +18,6 @@ function(add_benchmark test_cpp)
add_dependencies(memgraph__benchmark ${target_name})
endfunction(add_benchmark)
add_benchmark(data_structures/concurrent/map_mix_concurrent.cpp)
target_link_libraries(${test_prefix}map_mix_concurrent mg-single-node kvstore_dummy_lib)
add_benchmark(data_structures/concurrent/skiplist_insert.cpp)
target_link_libraries(${test_prefix}skiplist_insert mg-single-node kvstore_dummy_lib)
add_benchmark(data_structures/concurrent/skiplist_reverse_iteration.cpp)
target_link_libraries(${test_prefix}skiplist_reverse_iteration mg-single-node kvstore_dummy_lib)
add_benchmark(data_structures/concurrent/map_concurrent.cpp)
target_link_libraries(${test_prefix}map_concurrent mg-single-node kvstore_dummy_lib)
add_benchmark(data_structures/ring_buffer.cpp)
target_link_libraries(${test_prefix}ring_buffer mg-utils)
@ -48,12 +36,6 @@ target_link_libraries(${test_prefix}profile mg-query-with-kvstore-dummy)
add_benchmark(query/stripped.cpp)
target_link_libraries(${test_prefix}stripped mg-query-with-kvstore-dummy)
add_benchmark(edge_storage.cpp)
target_link_libraries(${test_prefix}edge_storage mg-single-node kvstore_dummy_lib)
add_benchmark(mvcc.cpp)
target_link_libraries(${test_prefix}mvcc mg-single-node kvstore_dummy_lib)
add_benchmark(rpc.cpp)
target_link_libraries(${test_prefix}rpc mg-comm-rpc)
@ -69,9 +51,6 @@ target_link_libraries(${test_prefix}skip_list_same_item mg-utils)
add_benchmark(skip_list_vs_stl.cpp)
target_link_libraries(${test_prefix}skip_list_vs_stl mg-utils)
add_benchmark(tx_engine.cpp)
target_link_libraries(${test_prefix}tx_engine mg-single-node kvstore_dummy_lib)
add_benchmark(expansion.cpp ${CMAKE_SOURCE_DIR}/src/glue/communication.cpp)
target_link_libraries(${test_prefix}expansion mg-query-with-kvstore-dummy mg-communication)

View File

@ -1,252 +0,0 @@
#include <random>
#include <thread>
#include <benchmark/benchmark_api.h>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "utils/random/random_generator.hpp"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
DEFINE_int32(start, 0, "Range start");
DEFINE_int32(end, 1000000000, "Range end");
DEFINE_int32(threads, 1, "Number of threads");
DEFINE_int32(string_length, 128, "String length");
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V>
static void InsertValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> &elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Deletion Benchmark Test
*/
template <class K, class V>
static void DeleteValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.remove(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V>
static void ContainsValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State &state, auto *map, auto &elements) {
InsertValue(state, map, elements);
};
auto BM_DeleteValue = [](benchmark::State &state, auto *map, auto elements) {
DeleteValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State &state, auto *map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments() {
RANGE_START = FLAGS_start;
RANGE_END = FLAGS_end;
THREADS = std::min(FLAGS_threads,
static_cast<int>(std::thread::hardware_concurrency()));
STRING_LENGTH = FLAGS_string_length;
}
int main(int argc, char **argv) {
benchmark::Initialize(&argc, argv);
parse_arguments();
google::InitGoogleLogging(argv[0]);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
// maps used for testing
ConcurrentMap<int, int> ii_map;
ConcurrentMap<int, std::string> is_map;
ConcurrentMap<std::string, int> si_map;
ConcurrentMap<std::string, std::string> ss_map;
// random elements for testing
auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[Int, Int]", BM_InsertValue, &ii_map,
ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[Int, String]", BM_InsertValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[Int, Int]", BM_ContainsValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[Int, String]", BM_ContainsValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Deletion Banchamark Tests
benchmark::RegisterBenchmark("DeleteValue[Int, Int]", BM_DeleteValue, &ii_map,
ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[Int, String]", BM_DeleteValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, Int]", BM_DeleteValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, String]", BM_DeleteValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,125 +0,0 @@
#include <iostream>
#include <random>
#include <thread>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "gflags/gflags.h"
#include "data_structures/concurrent/concurrent_map.hpp"
#include "utils/random/random_generator.hpp"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Deletion and Find
- benchmarks time for total execution with operation percentages
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global Arguments
int MAX_ELEMENTS = 1 << 20, MULTIPLIER = 2;
int THREADS, INSERT_PERC, DELETE_PERC, CONTAINS_PERC, RANGE_START, RANGE_END;
// ConcurrentMap Becnhmark Test using percentages for Insert, Delete, Find
template <class K, class V>
static void Rape(benchmark::State &state, ConcurrentMap<int, int> *map,
const std::vector<std::pair<K, V>> &elements) {
int number_of_elements = state.range(0);
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
float current_percentage = (float)start / (float)number_of_elements * 100;
if (current_percentage < (float)INSERT_PERC) {
accessor.insert(elements[start].first, elements[start].second);
} else if (current_percentage < (float)CONTAINS_PERC + INSERT_PERC) {
accessor.contains(elements[start].first);
} else {
accessor.remove(elements[start].first);
}
}
}
state.SetComplexityN(state.range(0));
}
auto BM_Rape = [](benchmark::State &state, auto *map, auto &elements) {
Rape(state, map, elements);
};
DEFINE_int32(insert, 50, "Insertions percentage");
DEFINE_int32(delete, 20, "Deletions percentage");
DEFINE_int32(find, 30, "Find percentage");
DEFINE_int32(start, 0, "Range start");
DEFINE_int32(end, 1000000000, "Range end");
DEFINE_int32(threads, 1, "Number of threads");
/*
Commandline Arguments Parsing
Arguments:
* Insertion percentage (0-100)
-insert number(int)
* Deletion percentage (0-100)
-delete number(int)
* Find percentage (0-100)
-find number(int)
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
-threads number
*/
void parse_arguments() {
INSERT_PERC = FLAGS_insert;
DELETE_PERC = FLAGS_delete;
CONTAINS_PERC = FLAGS_find;
if (INSERT_PERC + DELETE_PERC + CONTAINS_PERC != 100) {
std::cout << "Invalid percentage" << std::endl;
std::cout << "Percentage must sum to 100" << std::endl;
exit(-1);
}
RANGE_START = FLAGS_start;
RANGE_END = FLAGS_end;
THREADS = std::min(FLAGS_threads,
static_cast<int>(std::thread::hardware_concurrency()));
}
int main(int argc, char **argv) {
benchmark::Initialize(&argc, argv);
parse_arguments();
google::InitGoogleLogging(argv[0]);
IntegerGenerator int_gen(RANGE_START, RANGE_END);
PairGenerator<IntegerGenerator, IntegerGenerator> pair_gen(&int_gen,
&int_gen);
ConcurrentMap<int, int> map;
auto elements = utils::random::generate_vector(pair_gen, MAX_ELEMENTS);
benchmark::RegisterBenchmark("Rape", BM_Rape, &map, elements)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,95 +0,0 @@
#pragma once
#include <algorithm>
#include <atomic>
#include <chrono>
#include <thread>
#include <vector>
#include "data_structures/concurrent/skiplist.hpp"
/**
* Helper functions for skiplist. This functions are used to insert
* concurrently into skiplist.
*/
class SkipListHelper {
public:
/**
* Inserts into a skiplist concurrently. Tries to synchronize all threads to
* start and end in the same time. This function should only be used to
* benchmark skiplist in a way that doesn't give thread a chance to consume
* more than the (end - start) / num_of_threads elements in the allocated
* time, since that would make the measurement inaccurate. Also shuffles the
* data to avoid consecutive value inserts.
*
* @param skiplist - skiplist instance
* @param start - value_range start
* @param end - value_range end (exclusive)
* @param num_of_threads - number of threads to insert with
* @param duration - duration of thread time in microseconds
* @return number of inserted elements
*/
static int InsertConcurrentSkiplistTimed(
SkipList<int> *skiplist, const int start, const int end,
const int num_of_threads, const std::chrono::microseconds &duration) {
std::vector<int> V(end - start);
for (int i = start; i < end; ++i) V[i] = i;
std::random_shuffle(V.begin(), V.end());
std::vector<std::thread> threads;
std::atomic<bool> stopped{1};
std::atomic<int> count{0};
for (int i = 0; i < num_of_threads; ++i) {
const int part = (end - start) / num_of_threads;
threads.emplace_back(std::thread(
[&V, &stopped, &count](SkipList<int> *skiplist, int start, int end) {
while (stopped)
;
auto accessor = skiplist->access();
for (int i = start; i < end && !stopped; ++i) {
while (accessor.insert(V[i]).second == false)
;
++count;
}
},
skiplist, start + i * part, start + (i + 1) * part));
}
stopped = false;
std::this_thread::sleep_for(duration);
stopped = true;
for (auto &x : threads) x.join();
return count;
}
/**
* Insert into skiplist concurrently. With the hardware maximal number of
* threads an instance will allow.
*
* @param skiplist - skiplist instance
* @param start - starting value to insert
* @param end - ending value to insert
*/
static void InsertConcurrentSkiplist(SkipList<int> *skiplist, int start,
int end) {
int number_of_threads = std::thread::hardware_concurrency();
std::vector<std::thread> threads;
for (int i = 0; i < number_of_threads; i++) {
const int part = (end - start) / number_of_threads;
threads.emplace_back(std::thread(
[](SkipList<int> *skiplist, int start, int end) {
auto accessor = skiplist->access();
for (; start < end; start++) {
while (!accessor.insert(std::move(start)).second)
;
}
},
skiplist, start + i * part, start + (i + 1) * part));
}
for (auto &thread : threads) thread.join();
}
private:
};

View File

@ -1,60 +0,0 @@
#ifndef NDEBUG
#define NDEBUG
#endif
#include <algorithm>
#include <thread>
#include <vector>
#include <benchmark/benchmark.h>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "data_structures/concurrent/skiplist.hpp"
#include "skiplist_helper.hpp"
void Insert(benchmark::State &state) {
SkipList<int> skiplist;
while (state.KeepRunning()) {
const int count = SkipListHelper::InsertConcurrentSkiplistTimed(
&skiplist, 0, 10000000, state.range(1),
std::chrono::microseconds(state.range(0)));
state.SetItemsProcessed(count); // Number of processed items in one
// iteration - useful for items/per s.
state.SetIterationTime(state.range(0) * 1.0 /
1000000); // Time the iteration took - since ideally
// all threads should run and stop at the
// same time we set the time manually.
auto sl_access = skiplist.access();
while (sl_access.size()) sl_access.remove(*sl_access.begin());
}
}
/**
* Invokes the test function with two arguments, time and number of threads.
* Time is specified in microseconds.
*/
static void CustomArguments(benchmark::internal::Benchmark *b) {
for (int i = (1 << 18); i <= (1 << 20); i *= 2)
for (int j = 1; j <= 8; ++j) b->Args({i, j});
}
/**
* This benchmark represents a use case of benchmarking one multi-threaded
* concurrent structure. This test assumes that all threads will start and end
* at the exact same time and will compete with each other.
*/
BENCHMARK(Insert)
->Apply(CustomArguments) // Set custom arguments.
->Unit(benchmark::kMicrosecond)
->UseManualTime() // Don't calculate real-time but depend on function
// providing the execution time.
->Repetitions(3)
->ReportAggregatesOnly(1);
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,85 +0,0 @@
/**
@date: 2017-01-31
@authors: Sandi Fatic
These tests are used to benchmark the ReverseIterator vs the Find function
while iterating the whole skiplist in reverse.
*/
#include <algorithm>
#include <thread>
#include <vector>
#include <glog/logging.h>
#include "benchmark/benchmark_api.h"
#include "data_structures/concurrent/skiplist.hpp"
#include "skiplist_helper.hpp"
#include "utils/random/random_generator.hpp"
using utils::random::NumberGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
static void ReverseFromRBegin(benchmark::State &state) {
while (state.KeepRunning()) {
state.PauseTiming();
SkipList<int> skiplist;
SkipListHelper::InsertConcurrentSkiplist(&skiplist, 0, state.range(0));
int counter = 10;
auto accessor = skiplist.access();
auto rbegin = accessor.rbegin();
auto rend = accessor.rend();
state.ResumeTiming();
for (int i = 0; i < counter; i++) {
if (rbegin != rend) {
rbegin++;
}
}
}
}
static void FindFromRBegin(benchmark::State &state) {
while (state.KeepRunning()) {
state.PauseTiming();
SkipList<int> skiplist;
SkipListHelper::InsertConcurrentSkiplist(&skiplist, 0, state.range(0));
int counter = 10;
auto accessor = skiplist.access();
auto rbegin = accessor.rbegin();
state.ResumeTiming();
for (int i = 0; i < counter; i++) {
accessor.find(*rbegin - i);
}
}
}
auto BM_ReverseFromRBegin = [](benchmark::State &state) {
ReverseFromRBegin(state);
};
auto BM_FindFromRBegin = [](benchmark::State &state) { FindFromRBegin(state); };
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
benchmark::RegisterBenchmark("ReverseFromRBegin", BM_ReverseFromRBegin)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16);
benchmark::RegisterBenchmark("FindFromRBegin", BM_FindFromRBegin)
->RangeMultiplier(2)
->Range(1 << 10, 1 << 16);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,335 +0,0 @@
#include <benchmark/benchmark.h>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#include <limits>
#include <set>
#include <unordered_set>
#include <utility>
#include <vector>
/** This suite of benchmarks test the data structure to hold edges in a vertex.
* Various backing data-structures are benchmarked for common operations. */
/** Generates a random pseudo-pointer from a certain range. The range is limited
to ensure we get vertex-pointer collisions (multiple edges for the same vertex)
with some probability.
*/
int64_t random_pointer() { return rand() % 1000; }
// The element of all our collections. In real MG storage these would be Vertex
// and Edge version list pointers. It's a placeholder for 8-byte pointers.
using TElement = std::pair<int64_t, int64_t>;
/**
* Converts a (beginning, end) pair of iterators into an iterable that can be
* passed on to itertools. */
template <typename TIterator>
class Iterable {
public:
Iterable(const TIterator &begin, const TIterator &end)
: begin_(begin), end_(end) {}
Iterable(TIterator &&begin, TIterator &&end)
: begin_(std::forward<TIterator>(begin)),
end_(std::forward<TIterator>(end)) {}
auto begin() { return begin_; };
auto end() { return end_; };
private:
TIterator begin_;
TIterator end_;
};
/** Keeps elements sorted in a vector. We get log2(n) lookups for known
* destination vertex, but everything else suffers. */
class SortedVector {
public:
void insert(int64_t vertex, int64_t edge) {
auto loc = std::lower_bound(storage_.begin(), storage_.end(),
TElement{vertex, edge});
// Lower_bound returns iterator to last element if there is no element
// greater then sought.
if (loc + 1 == storage_.end()) loc = storage_.end();
storage_.insert(loc, {vertex, edge});
}
// remove assumes the element is present, does not have to check
void remove(int64_t vertex, int64_t edge) {
storage_.erase(std::lower_bound(storage_.begin(), storage_.end(),
TElement{vertex, edge}));
}
auto iterable_over(int64_t vertex) {
return Iterable<std::vector<TElement>::iterator>(
std::lower_bound(storage_.begin(), storage_.end(),
TElement{vertex, std::numeric_limits<int64_t>::min()}),
std::upper_bound(
storage_.begin(), storage_.end(),
TElement{vertex, std::numeric_limits<int64_t>::max()}));
}
TElement random_element() { return storage_[rand() % storage_.size()]; }
std::vector<TElement> storage_;
};
/** Keeps elements in an vector with no guaranteed ordering. Generally works OK,
* but lookups for known destination vertices must be done with a linear scan.
*/
class Vector {
/** Custom iterator that takes care of skipping elements when used in a
* known-destination vertex scenario. */
class VertexIterator {
public:
VertexIterator(std::vector<TElement>::const_iterator position)
: position_(position) {}
VertexIterator(std::vector<TElement>::const_iterator position,
std::vector<TElement>::const_iterator end, int64_t vertex)
: position_(position), end_(end), vertex_(vertex) {
update_position();
}
VertexIterator &operator++() {
++position_;
if (vertex_ != 0) update_position();
return *this;
}
const TElement &operator*() const { return *position_; }
bool operator==(const VertexIterator &other) {
return position_ == other.position_;
}
bool operator!=(const VertexIterator &other) { return !(*this == other); }
private:
std::vector<TElement>::const_iterator position_;
// used only for the vertex-matching iterator
std::vector<TElement>::const_iterator end_;
int64_t vertex_{0};
void update_position() {
position_ = std::find_if(position_, end_, [this](const TElement &e) {
return e.first == vertex_;
});
}
};
public:
void insert(int64_t vertex, int64_t edge) {
storage_.emplace_back(vertex, edge);
}
// remove assumes the element is present, does not have to check
void remove(int64_t vertex, int64_t edge) {
auto found =
std::find(storage_.begin(), storage_.end(), TElement{vertex, edge});
*found = std::move(storage_.back());
storage_.pop_back();
}
auto iterable_over(int64_t vertex) {
return Iterable<VertexIterator>(
VertexIterator(storage_.begin(), storage_.end(), vertex),
VertexIterator(storage_.end()));
}
// Override begin() and end() to return our custom iterator. We need this so
// all edge iterators are of the same type, which is necessary for the current
// implementation of query::plan::Expand.
auto begin() { return VertexIterator(storage_.begin()); }
auto end() { return VertexIterator(storage_.end()); }
TElement random_element() { return storage_[rand() % storage_.size()]; }
std::vector<TElement> storage_;
};
template <typename TIterator>
auto make_iterable(TIterator &&begin, TIterator &&end) {
return Iterable<TIterator>(std::forward<TIterator>(begin),
std::forward<TIterator>(end));
}
class Set {
public:
void insert(int64_t vertex, int64_t edge) { storage_.insert({vertex, edge}); }
// remove assumes the element is present, does not have to check
void remove(int64_t vertex, int64_t edge) {
storage_.erase(storage_.find(TElement{vertex, edge}));
}
auto iterable_over(int64_t vertex) {
return Iterable<std::set<TElement>::iterator>(
storage_.lower_bound(
TElement{vertex, std::numeric_limits<int64_t>::min()}),
storage_.upper_bound(
TElement{vertex, std::numeric_limits<int64_t>::max()}));
}
TElement random_element() {
auto it = storage_.begin();
int to_remove =
storage_.size() == 1 ? 0 : rand() % ((int)storage_.size() - 1);
for (int i = 0; i < to_remove; i++) it++;
return *it;
}
std::set<TElement> storage_;
};
// hash function for the vertex accessor
namespace std {
template <>
struct hash<TElement> {
size_t operator()(const TElement &e) const { return e.first ^ e.second; };
};
}
class UnorderedMultiset {
private:
struct Hash {
size_t operator()(const TElement &element) const { return element.first; }
};
struct Equal {
bool operator()(const TElement &a, const TElement &b) const {
return a.first == b.first;
}
};
public:
void insert(int64_t vertex, int64_t edge) { storage_.insert({vertex, edge}); }
// remove assumes the element is present, does not have to check
void remove(int64_t vertex, int64_t edge) {
storage_.erase(storage_.find(TElement{vertex, edge}));
}
auto iterable_over(int64_t vertex) {
auto start_end = storage_.equal_range(TElement{vertex, 0});
return Iterable<std::unordered_multiset<TElement, Hash, Equal>::iterator>(
start_end.first, start_end.second);
}
TElement random_element() {
auto it = storage_.begin();
int to_remove =
storage_.size() == 1 ? 0 : rand() % ((int)storage_.size() - 1);
for (int i = 0; i < to_remove; i++) it++;
return *it;
}
std::unordered_multiset<TElement, Hash, Equal> storage_;
};
template <typename TStorage>
void Insert(benchmark::State &state) {
TStorage storage;
for (int i = 0; i < state.range(0); i++)
storage.insert(random_pointer(), random_pointer());
int64_t vertex = random_pointer();
int64_t edge = random_pointer();
while (state.KeepRunning()) {
storage.insert(vertex, edge);
state.PauseTiming();
storage.remove(vertex, edge);
state.ResumeTiming();
}
}
template <typename TStorage>
static void Remove(benchmark::State &state) {
TStorage storage;
for (int i = 0; i < state.range(0); i++)
storage.insert(random_pointer(), random_pointer());
TElement to_remove;
while (state.KeepRunning()) {
state.PauseTiming();
to_remove = storage.random_element();
state.ResumeTiming();
storage.remove(to_remove.first, to_remove.second);
state.PauseTiming();
storage.insert(random_pointer(), random_pointer());
state.ResumeTiming();
}
}
template <typename TStorage>
static void Iterate(benchmark::State &state) {
TStorage storage;
for (int i = 0; i < state.range(0); i++)
storage.insert(random_pointer(), random_pointer());
int64_t sum{0};
while (state.KeepRunning()) {
for (const auto &elem : storage.storage_) sum += elem.first;
}
}
template <typename TStorage>
static void IterateOverVertex(benchmark::State &state) {
TStorage storage;
for (int i = 0; i < state.range(0); i++)
storage.insert(random_pointer(), random_pointer());
TElement e;
while (state.KeepRunning()) {
state.PauseTiming();
e = storage.random_element();
state.ResumeTiming();
int64_t sum{0};
for (const auto &elem : storage.iterable_over(e.first)) sum += elem.first;
}
}
BENCHMARK_TEMPLATE(Insert, SortedVector)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Insert, Vector)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Insert, Set)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Insert, UnorderedMultiset)
->RangeMultiplier(4)
->Range(1, 4096);
BENCHMARK_TEMPLATE(Remove, SortedVector)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Remove, Vector)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Remove, Set)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Remove, UnorderedMultiset)
->RangeMultiplier(4)
->Range(1, 4096);
BENCHMARK_TEMPLATE(Iterate, SortedVector)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Iterate, Vector)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Iterate, Set)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(Iterate, UnorderedMultiset)
->RangeMultiplier(4)
->Range(1, 4096);
BENCHMARK_TEMPLATE(IterateOverVertex, SortedVector)
->RangeMultiplier(4)
->Range(1, 4096);
BENCHMARK_TEMPLATE(IterateOverVertex, Vector)
->RangeMultiplier(4)
->Range(1, 4096);
BENCHMARK_TEMPLATE(IterateOverVertex, Set)->RangeMultiplier(4)->Range(1, 4096);
BENCHMARK_TEMPLATE(IterateOverVertex, UnorderedMultiset)
->RangeMultiplier(4)
->Range(1, 4096);
int main(int argc, char **argv) {
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,69 +0,0 @@
#include <benchmark/benchmark.h>
#include <benchmark/benchmark_api.h>
#include <glog/logging.h>
#include "storage/single_node/mvcc/record.hpp"
#include "storage/single_node/mvcc/version_list.hpp"
#include "transactions/single_node/engine.hpp"
class Prop : public mvcc::Record<Prop> {
public:
Prop() = default;
Prop *CloneData() { return new Prop; }
};
// Benchmark multiple updates, and finds, focused on finds.
// This a rather weak test, but I'm not sure what's the better way to test this
// in the future.
// TODO(dgleich): Refresh this.
void MvccMix(benchmark::State &state) {
while (state.KeepRunning()) {
state.PauseTiming();
tx::Engine engine;
auto t1 = engine.Begin();
mvcc::VersionList<Prop> version_list(*t1, storage::Gid::FromInt(0));
engine.Commit(*t1);
auto t2 = engine.Begin();
state.ResumeTiming();
version_list.update(*t2);
state.PauseTiming();
state.ResumeTiming();
version_list.find(*t2);
state.PauseTiming();
engine.Abort(*t2);
auto t3 = engine.Begin();
state.ResumeTiming();
version_list.update(*t3);
state.PauseTiming();
auto t4 = engine.Begin();
// Repeat find state.range(0) number of times.
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i) {
version_list.find(*t4);
}
state.PauseTiming();
engine.Commit(*t3);
engine.Commit(*t4);
state.ResumeTiming();
}
}
BENCHMARK(MvccMix)
->RangeMultiplier(2) // Multiply next range testdata size by 2
->Range(1 << 14, 1 << 23) // 1<<14, 1<<15, 1<<16, ...
->Unit(benchmark::kMillisecond);
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
::benchmark::Initialize(&argc, argv);
::benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,40 +0,0 @@
#include <thread>
#include <vector>
#include <glog/logging.h>
#include "transactions/single_node/engine.hpp"
#include "utils/timer.hpp"
void Benchmark(int64_t num_threads, int64_t num_transactions) {
LOG(INFO) << "Testing with " << num_threads << " threads and "
<< num_transactions << " transactions per thread...";
tx::Engine engine;
std::vector<std::thread> threads;
utils::Timer timer;
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back([num_transactions, &engine]() {
for (int j = 0; j < num_transactions; ++j) {
auto *tx = engine.Begin();
engine.Commit(*tx);
}
});
}
for (auto &t : threads) t.join();
int64_t tx_count = engine.GlobalGcSnapshot().front() - 1;
CHECK(tx_count == num_threads * num_transactions)
<< "Got a bad number of transactions: " << tx_count;
auto tps = (double)(tx_count) / timer.Elapsed().count();
LOG(INFO) << "Result (millions of transactions per second) " << tps / 1000000;
}
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
for (int thread_count : {1, 2, 4, 8, 16}) {
Benchmark(thread_count, 100000);
}
return 0;
}

View File

@ -17,18 +17,6 @@ function(add_concurrent_test test_cpp)
add_dependencies(memgraph__concurrent ${target_name})
endfunction(add_concurrent_test)
add_concurrent_test(dynamic_bitset_clear_n.cpp)
target_link_libraries(${test_prefix}dynamic_bitset_clear_n mg-single-node kvstore_dummy_lib)
add_concurrent_test(dynamic_bitset.cpp)
target_link_libraries(${test_prefix}dynamic_bitset mg-single-node kvstore_dummy_lib)
add_concurrent_test(dynamic_bitset_set.cpp)
target_link_libraries(${test_prefix}dynamic_bitset_set mg-single-node kvstore_dummy_lib)
add_concurrent_test(dynamic_bitset_set_n.cpp)
target_link_libraries(${test_prefix}dynamic_bitset_set_n mg-single-node kvstore_dummy_lib)
add_concurrent_test(network_read_hang.cpp)
target_link_libraries(${test_prefix}network_read_hang mg-communication)
@ -59,38 +47,5 @@ target_link_libraries(${test_prefix}skip_list_remove mg-utils)
add_concurrent_test(skip_list_remove_competitive.cpp)
target_link_libraries(${test_prefix}skip_list_remove_competitive mg-utils)
add_concurrent_test(sl_hang.cpp)
target_link_libraries(${test_prefix}sl_hang mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_insert_competetive.cpp)
target_link_libraries(${test_prefix}sl_insert_competetive mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_insert.cpp)
target_link_libraries(${test_prefix}sl_insert mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_map.cpp)
target_link_libraries(${test_prefix}sl_map mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_memory.cpp)
target_link_libraries(${test_prefix}sl_memory mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_memory_leak.cpp)
target_link_libraries(${test_prefix}sl_memory_leak mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_remove_competetive.cpp)
target_link_libraries(${test_prefix}sl_remove_competetive mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_remove_disjoint.cpp)
target_link_libraries(${test_prefix}sl_remove_disjoint mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_remove_joint.cpp)
target_link_libraries(${test_prefix}sl_remove_joint mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_set.cpp)
target_link_libraries(${test_prefix}sl_set mg-single-node kvstore_dummy_lib)
add_concurrent_test(sl_simulation.cpp)
target_link_libraries(${test_prefix}sl_simulation mg-single-node kvstore_dummy_lib)
add_concurrent_test(spin_lock.cpp)
target_link_libraries(${test_prefix}spin_lock mg-utils)

View File

@ -1,38 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t bit_part_len = 2;
constexpr size_t no_slots = 1e4;
constexpr size_t key_range = no_slots * THREADS_NO * bit_part_len;
constexpr size_t no_sets_per_clear = 2;
// TODO: document the test
int main() {
DynamicBitset<> db;
auto seted = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(no_slots);
auto clear_op = rand_gen_bool(no_sets_per_clear);
std::vector<bool> set(key_range);
for (size_t i = 0; i < op_per_thread; i++) {
size_t num = rand() * THREADS_NO * bit_part_len + index * bit_part_len;
if (clear_op()) {
db.clear(num, bit_part_len);
for (int j = 0; j < bit_part_len; j++) {
set[num + j] = false;
}
} else {
db.set(num, bit_part_len);
for (int j = 0; j < bit_part_len; j++) set[num + j] = true;
}
}
return set;
}));
check_set(db, seted);
}

View File

@ -1,52 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t up_border_bit_set_pow2 = 3;
constexpr size_t key_range =
op_per_thread * THREADS_NO * (1 << up_border_bit_set_pow2) * 2;
// TODO: document the test
int main() {
DynamicBitset<> db;
auto seted = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
db.set(num, len);
for (int j = 0; j < len; j++) set[num + j] = true;
}
return set;
}));
auto cleared =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
for (int j = 0; j < len; j++) {
set[num + j] = set[num + j] | db.at(num + j);
}
db.clear(num, len);
}
return set;
}));
for (size_t i = 0; i < seted.size(); i++) {
seted[i] = seted[i] & (!cleared[i]);
}
check_set(db, seted);
}

View File

@ -1,26 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t key_range = op_per_thread * THREADS_NO * 3;
// TODO: document the test
int main() {
DynamicBitset<> db;
auto set = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
std::vector<bool> set(key_range);
for (size_t i = 0; i < op_per_thread; i++) {
size_t num = rand();
db.set(num);
set[num] = true;
}
return set;
}));
check_set(db, set);
}

View File

@ -1,30 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t up_border_bit_set_pow2 = 3;
constexpr size_t key_range =
op_per_thread * THREADS_NO * (1 << up_border_bit_set_pow2) * 2;
// TODO: document the test
int main() {
DynamicBitset<> db;
auto set = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
db.set(num, len);
for (int j = 0; j < len; j++) set[num + j] = true;
}
return set;
}));
check_set(db, set);
}

View File

@ -1,37 +0,0 @@
#include "gtest/gtest.h"
#include <thread>
#include <vector>
#include "data_structures/concurrent/skiplist.hpp"
// Try to provoke find_or_larger to hang. This happened before and caused
// Jenkins to stop responding. It is hard to recreate deterministically and this
// is the best we can do without doing friend_tests or refactoring skiplist.
TEST(SkipList, HangDuringFindOrLarger) {
std::vector<std::thread> threads;
SkipList<int> skiplist;
const int num_of_threads = 8;
const int iter = 100000;
for (int i = 0; i < num_of_threads; ++i) {
threads.emplace_back([&skiplist]() {
auto accessor = skiplist.access();
for (int i = 0; i < iter; ++i) accessor.insert(rand() % 3);
});
threads.emplace_back([&skiplist]() {
auto accessor = skiplist.access();
for (int i = 0; i < iter; ++i) accessor.remove(rand() % 3);
});
threads.emplace_back([&skiplist]() {
auto accessor = skiplist.access();
for (int i = 0; i < iter; ++i)
accessor.find_or_larger(rand() % 3);
});
}
for (auto &thread : threads) thread.join();
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,41 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 100000;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
// TODO: document the test
// This test checks insert_unique method under pressure.
// Test checks for missing data and changed/overwriten data.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures =
run<std::vector<size_t>>(THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
long long downcount = elems_per_thread;
std::vector<size_t> owned;
do {
auto key = rand();
if (acc.insert(key, index).second) {
downcount--;
owned.push_back(key);
}
} while (downcount > 0);
check_present_same(acc, index, owned);
return owned;
});
auto accessor = skiplist.access();
for (auto &owned : collect(futures)) {
check_present_same(accessor, owned);
}
check_size(accessor, THREADS_NO * elems_per_thread);
check_order(accessor);
}

View File

@ -1,41 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 100000;
// TODO: document the test
// This test checks insert_unique method under pressure.
// Threads will try to insert keys in the same order.
// This will force threads to compete intensly with each other.
// Test checks for missing data and changed/overwriten data.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures =
run<std::vector<size_t>>(THREADS_NO, skiplist, [](auto acc, auto index) {
long long downcount = elems_per_thread;
std::vector<size_t> owned;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wfor-loop-analysis"
for (int i = 0; downcount > 0; i++) {
if (acc.insert(i, index).second) {
downcount--;
owned.push_back(i);
}
}
#pragma GCC diagnostic pop
check_present_same(acc, index, owned);
return owned;
});
auto accessor = skiplist.access();
for (auto &owned : collect(futures)) {
check_present_same(accessor, owned);
}
check_size(accessor, THREADS_NO * elems_per_thread);
check_order(accessor);
}

View File

@ -1,76 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 1e5;
// TODO: document the test
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
std::vector<std::thread> threads;
map_t skiplist;
// put THREADS_NO * elems_per_thread items to the skiplist
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
threads.emplace_back(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
accessor.insert(elem_i, elem_i);
}
},
thread_i * elems_per_thread,
thread_i * elems_per_thread + elems_per_thread);
}
// wait all threads
for (auto &thread : threads) {
thread.join();
}
// get skiplist size
{
auto accessor = skiplist.access();
CHECK(accessor.size() == THREADS_NO * elems_per_thread)
<< "all elements in skiplist";
}
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
CHECK(accessor.remove(elem_i) == true) << "";
}
},
thread_i * elems_per_thread,
thread_i * elems_per_thread + elems_per_thread);
}
// // wait all threads
for (auto &thread : threads) {
thread.join();
}
// check size
{
auto accessor = skiplist.access();
CHECK(accessor.size() == 0) << "Size should be 0, but size is "
<< accessor.size();
}
// check count
{
size_t iterator_counter = 0;
auto accessor = skiplist.access();
for (auto elem : accessor) {
++iterator_counter;
cout << elem.first << " ";
}
CHECK(iterator_counter == 0) << "deleted elements";
}
{
auto accessor = skiplist.access();
check_order(accessor);
}
return 0;
}

View File

@ -1,24 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elements = 2e6;
/**
* Put elements number of elements in the skiplist per each thread and see
* is there any memory leak
*/
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures = run<size_t>(THREADS_NO, skiplist, [](auto acc, auto index) {
for (size_t i = 0; i < elements; i++) {
acc.insert(i, index);
}
return index;
});
collect(futures);
auto accessor = skiplist.access();
check_size(accessor, elements);
}

View File

@ -1,76 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 1);
constexpr size_t elems_per_thread = 16e5;
// TODO: Memory leak at 1,600,000 elements (Kruno wrote this here but
// the previous (now deleted) memory_check method had invalid implementation)
// 1. implement valid memory_check
// 2. analyse this code
// 3. fix the memory leak
// 4. write proper test
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
std::vector<std::thread> threads;
map_t skiplist;
// put THREADS_NO * elems_per_thread items to the skiplist
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
threads.emplace_back(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
accessor.insert(elem_i, elem_i);
}
},
thread_i * elems_per_thread,
thread_i * elems_per_thread + elems_per_thread);
}
// wait all threads
for (auto &thread : threads) {
thread.join();
}
// get skiplist size
{
auto accessor = skiplist.access();
CHECK(accessor.size() == THREADS_NO * elems_per_thread)
<< "all elements in skiplist";
}
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
CHECK(accessor.remove(elem_i) == true) << "";
}
},
thread_i * elems_per_thread,
thread_i * elems_per_thread + elems_per_thread);
}
// // wait all threads
for (auto &thread : threads) {
thread.join();
}
// check size
{
auto accessor = skiplist.access();
CHECK(accessor.size() == 0)
<< "Size should be 0, but size is " << accessor.size();
}
// check count
{
size_t iterator_counter = 0;
auto accessor = skiplist.access();
for (auto elem : accessor) {
++iterator_counter;
cout << elem.first << " ";
}
CHECK(iterator_counter == 0) << "deleted elements";
}
return 0;
}

View File

@ -1,65 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks remove method under pressure.
// Threads will try to insert and remove keys aproximetly in the same order.
// This will force threads to compete intensly with each other.
// Calls of remove method are interleaved with insert calls.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
long long sum = 0;
long long count = 0;
for (int i = 0; downcount > 0; i++) {
auto data = i % max_number;
if (rand_op()) {
auto t = i;
while (t > 0) {
if (acc.remove(t)) {
sum -= t % max_number;
downcount--;
count--;
break;
}
t--;
}
} else {
if (acc.insert(i, data).second) {
sum += data;
count++;
downcount--;
}
}
}
return std::pair<long long, long long>(sum, count);
});
auto accessor = skiplist.access();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : accessor) {
sums -= e.second;
}
CHECK(sums == 0) << "Aproximetly Same values are present";
check_size(accessor, counters);
check_order(accessor);
return 0;
}

View File

@ -1,52 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e5;
constexpr size_t op_per_thread = 1e6;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks remove method under pressure.
// Each thread removes it's own data. So removes are disjoint.
// Calls of remove method are interleaved with insert calls.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures =
run<std::vector<size_t>>(THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<size_t> owned;
do {
if (owned.size() != 0 && rand_op()) {
auto rem = rand() % owned.size();
CHECK(acc.remove(owned[rem])) << "Owned data removed";
owned.erase(owned.begin() + rem);
downcount--;
} else {
auto key = rand();
if (acc.insert(key, index).second) {
downcount--;
owned.push_back(key);
}
}
} while (downcount > 0);
check_present_same(acc, index, owned);
return owned;
});
auto accessor = skiplist.access();
size_t count = 0;
for (auto &owned : collect(futures)) {
check_present_same(accessor, owned);
count += owned.second.size();
}
check_size(accessor, count);
check_order(accessor);
return 0;
}

View File

@ -1,63 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks remove method under pressure.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
long long sum = 0;
long long count = 0;
do {
auto num = rand();
auto data = num % max_number;
if (rand_op()) {
if (acc.remove(num)) {
sum -= data;
downcount--;
count--;
}
} else {
if (acc.insert(num, data).second) {
sum += data;
downcount--;
count++;
}
}
} while (downcount > 0);
return std::pair<long long, long long>(sum, count);
});
auto accessor = skiplist.access();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : accessor) {
sums -= e.second;
}
CHECK(sums == 0) << "Aproximetly Same values are present";
check_size(accessor, counters);
check_order(accessor);
return 0;
}

View File

@ -1,65 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks set.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
SkipList<std::string> skiplist;
auto futures =
run<std::vector<long>>(THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long> set(key_range);
do {
int num = rand();
std::string num_str = std::to_string(num);
if (rand_op()) {
if (acc.remove(num_str)) {
downcount--;
set[num]--;
}
} else {
std::string num_str = std::to_string(num);
if (acc.insert(num_str).second) {
downcount--;
set[num]++;
}
}
} while (downcount > 0);
return set;
});
long set[key_range] = {0};
for (auto &data : collect(futures)) {
for (int i = 0; i < key_range; i++) {
set[i] += data.second[i];
}
}
auto accessor = skiplist.access();
for (int i = 0; i < key_range; i++) {
CHECK(set[i] == 0 || set[i] == 1 ||
(set[i] == 1) ^ accessor.contains(std::to_string(i)))
<< "Set doesn't hold it's guarantees.";
}
for (auto &e : accessor) {
set[std::stoi(e)]--;
}
check_zero(key_range, set, "Set");
return 0;
}

View File

@ -1,68 +0,0 @@
#include "common.hpp"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e5;
constexpr size_t op_per_thread = 1e6;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 5;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test simulates behavior of transactions.
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
// no_find_per_change and no_insert_for_one_delete.
int main(int, char **argv) {
google::InitGoogleLogging(argv[0]);
map_t skiplist;
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_change = rand_gen_bool(no_find_per_change);
auto rand_delete = rand_gen_bool(no_insert_for_one_delete);
long long sum = 0;
long long count = 0;
for (int i = 0; i < op_per_thread; i++) {
auto num = rand();
auto data = num % max_number;
if (rand_change()) {
if (rand_delete()) {
if (acc.remove(num)) {
sum -= data;
count--;
}
} else {
if (acc.insert(num, data).second) {
sum += data;
count++;
}
}
} else {
auto value = acc.find(num);
CHECK(value == acc.end() || value->second == data)
<< "Data is invalid";
}
}
return std::pair<long long, long long>(sum, count);
});
auto accessor = skiplist.access();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : accessor) {
sums -= e.second;
}
CHECK(sums == 0) << "Same values aren't present";
check_size(accessor, counters);
check_order(accessor);
}

View File

@ -55,12 +55,6 @@ target_link_libraries(${test_prefix}expression_pretty_printer mg-query-with-kvst
add_manual_test(single_query.cpp ${CMAKE_SOURCE_DIR}/src/glue/communication.cpp)
target_link_libraries(${test_prefix}single_query mg-query-with-kvstore-dummy mg-communication)
add_manual_test(sl_position_and_count.cpp)
target_link_libraries(${test_prefix}sl_position_and_count mg-single-node kvstore_dummy_lib)
add_manual_test(snapshot_explorer.cpp)
target_link_libraries(${test_prefix}snapshot_explorer mg-single-node kvstore_dummy_lib)
add_manual_test(stripped_timing.cpp)
target_link_libraries(${test_prefix}stripped_timing mg-query-with-kvstore-dummy)
@ -70,9 +64,6 @@ target_link_libraries(${test_prefix}ssl_client mg-communication)
add_manual_test(ssl_server.cpp)
target_link_libraries(${test_prefix}ssl_server mg-communication)
add_manual_test(wal_explorer.cpp)
target_link_libraries(${test_prefix}wal_explorer mg-single-node kvstore_dummy_lib)
add_manual_test(xorshift.cpp)
target_link_libraries(${test_prefix}xorshift mg-utils)

View File

@ -1,103 +0,0 @@
#include <ctime>
#include <iostream>
#include <limits>
#include <vector>
#include <fmt/format.h>
#include "data_structures/concurrent/skiplist.hpp"
/** Calculates the mean of a given vector of numbers */
template <typename TNumber>
auto mean(const std::vector<TNumber> &values) {
TNumber r_val = 0;
for (const auto &value : values) r_val += value;
return r_val / values.size();
}
/** Logging helper function */
template <typename... TArgs>
void log(const std::string &format, TArgs &&... args) {
std::cout << fmt::format(format, std::forward<TArgs>(args)...) << std::endl;
}
/** Creates a skiplist containing all ints in range [0, size) */
std::unique_ptr<SkipList<int>> make_sl(int size) {
auto sl = std::make_unique<SkipList<int>>();
auto access = sl->access();
for (int i = 0; i < size; i++) access.insert(i);
return sl;
}
/**
* Performs testing of the position_and_count function
* of a skiplist. Looks for three positions in the skiplist,
* those at 1/4, 1/2 and 3/4 values. Prints out results
* to stdout, does not do any automated checks if the
* results are valid.
*
* @param size - size of the skiplist to test with
* @param iterations - number of iterations of each test.
* @param granularity - How many sequential ints should be
* considered equal in testing by the custom `less`
* function.
*/
void test(int size, int iterations = 20, int granularity = 1) {
auto less = [granularity](const int &a, const int &b) {
return a / granularity < b / granularity;
};
auto equal = [granularity](const int &a, const int &b) {
return a / granularity == b / granularity;
};
log("\nTesting skiplist size {} with granularity {}", size, granularity);
// test at 1/4, 1/2 and 3/4 points
std::vector<int> test_positions({size / 4, size / 2, size * 3 / 4});
std::vector<std::vector<int>> position(3);
std::vector<std::vector<int>> count(3);
std::vector<std::vector<double>> time(3);
for (int iteration = 0; iteration < iterations; iteration++) {
auto sl = make_sl(size);
for (auto pos : {0, 1, 2}) {
clock_t start_time = clock();
auto pos_and_count =
sl->access().position_and_count(test_positions[pos], less, equal);
auto t = double(clock() - start_time) / CLOCKS_PER_SEC;
position[pos].push_back(pos_and_count.first);
count[pos].push_back(pos_and_count.second);
time[pos].push_back(t);
}
}
// convert values to errors
for (auto pos_index : {0, 1, 2}) {
auto test_position = test_positions[pos_index];
log("\tPosition {}", test_position);
for (auto &position_elem : position[pos_index])
position_elem = std::abs(position_elem - test_position);
log("\t\tMean position error: {}", mean(position[pos_index]));
for (auto &count_elem : count[pos_index])
count_elem = std::abs(count_elem - granularity);
log("\t\tMean count error: {}", mean(count[pos_index]));
log("\t\tMean time (ms): {}", mean(time[pos_index]) * 1000);
}
}
int main(int argc, char *argv[]) {
log("Skiplist position and count testing");
int size = 1000;
int iterations = 10;
if (argc > 1) size = (int)std::stoi(argv[1]);
if (argc > 2) iterations = (int)std::stoi(argv[2]);
std::vector<int> granularitys;
for (int i = 1; i < size; i *= 100) granularitys.push_back(i);
for (auto granularity : granularitys) test(size, iterations, granularity);
return 0;
}

View File

@ -1,125 +0,0 @@
#include <filesystem>
#include <iostream>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "communication/bolt/v1/decoder/decoder.hpp"
#include "durability/hashed_file_reader.hpp"
#include "durability/single_node/recovery.hpp"
#include "durability/single_node/version.hpp"
DEFINE_string(snapshot_file, "", "Snapshot file location");
using communication::bolt::Value;
namespace fs = std::filesystem;
int main(int argc, char *argv[]) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
google::InitGoogleLogging(argv[0]);
// This makes sure we update the explorer when we bump the snapshot version.
// Snapshot layout is described in durability/version.hpp
static_assert(durability::kVersion == 11,
"Wrong snapshot version, please update!");
fs::path snapshot_path(FLAGS_snapshot_file);
CHECK(fs::exists(snapshot_path)) << "File doesn't exist!";
HashedFileReader reader;
communication::bolt::Decoder<HashedFileReader> decoder(reader);
CHECK(reader.Open(snapshot_path)) << "Couldn't open snapshot file!";
auto magic_number = durability::kSnapshotMagic;
reader.Read(magic_number.data(), magic_number.size());
CHECK(magic_number == durability::kSnapshotMagic) << "Magic number mismatch";
int64_t vertex_count, edge_count;
uint64_t hash;
CHECK(durability::ReadSnapshotSummary(reader, vertex_count, edge_count, hash))
<< "ReadSnapshotSummary failed";
LOG(INFO) << "Vertex count: " << vertex_count;
LOG(INFO) << "Edge count: " << edge_count;
LOG(INFO) << "Hash: " << hash;
Value dv;
decoder.ReadValue(&dv, Value::Type::Int);
CHECK(dv.ValueInt() == durability::kVersion)
<< "Snapshot version mismatch"
<< ", got " << dv.ValueInt() << " expected " << durability::kVersion;
decoder.ReadValue(&dv, Value::Type::Int);
LOG(INFO) << "Transactional ID of the snapshooter " << dv.ValueInt();
decoder.ReadValue(&dv, Value::Type::List);
for (const auto &value : dv.ValueList()) {
CHECK(value.IsInt()) << "Transaction is not a number!";
LOG(INFO) << "Transactional snapshot of the snapshooter "
<< value.ValueInt();
}
decoder.ReadValue(&dv, Value::Type::List);
auto index_value = dv.ValueList();
for (auto it = index_value.begin(); it != index_value.end();) {
auto label = *it++;
CHECK(label.IsString()) << "Label is not a string!";
CHECK(it != index_value.end()) << "Missing propery for label "
<< label.ValueString();
auto property = *it++;
CHECK(property.IsString()) << "Property is not a string!";
LOG(INFO) << "Adding label " << label.ValueString() << " and property "
<< property.ValueString();
}
decoder.ReadValue(&dv, Value::Type::List);
auto unique_constraint = dv.ValueList();
for (auto it = unique_constraint.begin(); it != unique_constraint.end();) {
std::string log("Adding unique constraint: ");
CHECK(it->IsString()) << "Label is not a string!";
log.append(it->ValueString());
log.append(" -> [");
++it;
CHECK(it->IsInt()) << "Number of properties is not an int!";
int64_t prop_size = it->ValueInt();
++it;
for (size_t i = 0; i < prop_size; ++i) {
CHECK(it->IsString()) << "Property is not a string!";
log.append(it->ValueString());
if (i != prop_size -1) {
log.append(", ");
} else {
log.append("]");
}
++it;
}
LOG(INFO) << log;
}
for (int64_t i = 0; i < vertex_count; ++i) {
auto vertex = decoder.ReadValue(&dv, Value::Type::Vertex);
CHECK(vertex) << "Failed to read vertex " << i;
}
for (int64_t i = 0; i < edge_count; ++i) {
auto edge = decoder.ReadValue(&dv, Value::Type::Edge);
CHECK(edge) << "Failed to read edge " << i;
}
reader.ReadType(vertex_count);
LOG(INFO) << "Vertex count: " << vertex_count;
reader.ReadType(edge_count);
LOG(INFO) << "Edge count:" << edge_count;
LOG(INFO) << "Hash: " << reader.hash();
CHECK(reader.Close()) << "Failed to close the reader";
return 0;
}

View File

@ -1,94 +0,0 @@
#include <filesystem>
#include <iostream>
#include <limits>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include "durability/hashed_file_reader.hpp"
#include "durability/single_node/recovery.hpp"
#include "durability/single_node/state_delta.hpp"
#include "durability/single_node/version.hpp"
#include "durability/single_node/wal.hpp"
#include "transactions/type.hpp"
DEFINE_string(wal_file, "", "WAL file location");
using communication::bolt::Value;
namespace fs = std::filesystem;
std::string StateDeltaTypeToString(database::StateDelta::Type type) {
switch (type) {
case database::StateDelta::Type::TRANSACTION_BEGIN:
return "TRANSACTION_BEGIN";
case database::StateDelta::Type::TRANSACTION_COMMIT:
return "TRANSACTION_COMMIT";
case database::StateDelta::Type::TRANSACTION_ABORT:
return "TRANSACTION_ABORT";
case database::StateDelta::Type::CREATE_VERTEX:
return "CREATE_VERTEX";
case database::StateDelta::Type::CREATE_EDGE:
return "CREATE_EDGE";
case database::StateDelta::Type::SET_PROPERTY_VERTEX:
return "SET_PROPERTY_VERTEX";
case database::StateDelta::Type::SET_PROPERTY_EDGE:
return "SET_PROPERTY_EDGE";
case database::StateDelta::Type::ADD_LABEL:
return "ADD_LABEL";
case database::StateDelta::Type::REMOVE_LABEL:
return "REMOVE_LABEL";
case database::StateDelta::Type::REMOVE_VERTEX:
return "REMOVE_VERTEX";
case database::StateDelta::Type::REMOVE_EDGE:
return "REMOVE_EDGE";
case database::StateDelta::Type::BUILD_INDEX:
return "BUILD_INDEX";
case database::StateDelta::Type::DROP_INDEX:
return "DROP_INDEX";
case database::StateDelta::Type::BUILD_UNIQUE_CONSTRAINT:
return "BUILD_UNIQUE_CONSTRAINT";
case database::StateDelta::Type::DROP_UNIQUE_CONSTRAINT:
return "DROP_UNIQUE_CONSTRAINT";
}
}
int main(int argc, char *argv[]) {
gflags::ParseCommandLineFlags(&argc, &argv, true);
google::InitGoogleLogging(argv[0]);
fs::path wal_path(FLAGS_wal_file);
CHECK(fs::exists(wal_path)) << "File doesn't exist!";
HashedFileReader wal_reader;
CHECK(wal_reader.Open(wal_path)) << "Couldn't open wal file!";
communication::bolt::Decoder<HashedFileReader> decoder(wal_reader);
auto magic_number = durability::kWalMagic;
wal_reader.Read(magic_number.data(), magic_number.size());
CHECK(magic_number == durability::kWalMagic) << "Wal magic number mismatch";
communication::bolt::Value dv;
decoder.ReadValue(&dv);
CHECK(dv.ValueInt() == durability::kVersion) << "Wal version mismatch";
tx::TransactionId max_observed_tx_id{0};
tx::TransactionId min_observed_tx_id{std::numeric_limits<uint64_t>::max()};
std::vector<std::string> wal_entries;
while (true) {
auto delta = database::StateDelta::Decode(wal_reader, decoder);
if (!delta) break;
max_observed_tx_id = std::max(max_observed_tx_id, delta->transaction_id);
min_observed_tx_id = std::min(min_observed_tx_id, delta->transaction_id);
LOG(INFO) << "Found tx: " << delta->transaction_id << " "
<< StateDeltaTypeToString(delta->type);
}
LOG(INFO) << "Min tx " << min_observed_tx_id;
LOG(INFO) << "Max tx " << max_observed_tx_id;
return 0;
}

View File

@ -24,72 +24,12 @@ target_link_libraries(${test_prefix}bolt_encoder mg-communication mg-query-with-
add_unit_test(commit_log_v2.cpp)
target_link_libraries(${test_prefix}commit_log_v2 glog gflags)
add_unit_test(concurrent_id_mapper_single_node.cpp)
target_link_libraries(${test_prefix}concurrent_id_mapper_single_node mg-single-node kvstore_dummy_lib)
add_unit_test(concurrent_map_access.cpp)
target_link_libraries(${test_prefix}concurrent_map_access mg-single-node kvstore_dummy_lib)
add_unit_test(concurrent_map.cpp)
target_link_libraries(${test_prefix}concurrent_map mg-single-node kvstore_dummy_lib)
add_unit_test(database_key_index.cpp)
target_link_libraries(${test_prefix}database_key_index mg-single-node kvstore_dummy_lib)
add_unit_test(database_label_property_index.cpp)
target_link_libraries(${test_prefix}database_label_property_index mg-single-node kvstore_dummy_lib)
add_unit_test(database_transaction_timeout.cpp)
target_link_libraries(${test_prefix}database_transaction_timeout mg-single-node kvstore_dummy_lib)
add_unit_test(datastructure_union_find.cpp)
target_link_libraries(${test_prefix}datastructure_union_find mg-single-node kvstore_dummy_lib)
add_unit_test(deferred_deleter.cpp)
target_link_libraries(${test_prefix}deferred_deleter mg-single-node kvstore_dummy_lib)
add_unit_test(durability.cpp)
target_link_libraries(${test_prefix}durability mg-single-node kvstore_dummy_lib)
add_unit_test(dynamic_bitset.cpp)
target_link_libraries(${test_prefix}dynamic_bitset mg-single-node kvstore_dummy_lib)
add_unit_test(edges_single_node.cpp)
target_link_libraries(${test_prefix}edges_single_node mg-single-node kvstore_dummy_lib)
add_unit_test(graph_db_accessor.cpp)
target_link_libraries(${test_prefix}graph_db_accessor mg-single-node kvstore_dummy_lib)
add_unit_test(graph_db_accessor_index_api.cpp)
target_link_libraries(${test_prefix}graph_db_accessor_index_api mg-single-node kvstore_dummy_lib)
add_unit_test(graph_db.cpp)
target_link_libraries(${test_prefix}graph_db mg-single-node kvstore_dummy_lib)
target_link_libraries(${test_prefix}datastructure_union_find glog gflags)
add_unit_test(kvstore.cpp)
target_link_libraries(${test_prefix}kvstore kvstore_lib glog)
add_unit_test(mvcc.cpp)
target_link_libraries(${test_prefix}mvcc mg-single-node kvstore_dummy_lib)
add_unit_test(mvcc_find.cpp)
target_link_libraries(${test_prefix}mvcc_find mg-single-node kvstore_dummy_lib)
add_unit_test(mvcc_gc.cpp)
target_link_libraries(${test_prefix}mvcc_gc mg-single-node kvstore_dummy_lib)
add_unit_test(mvcc_one_transaction.cpp)
target_link_libraries(${test_prefix}mvcc_one_transaction mg-single-node kvstore_dummy_lib)
add_unit_test(mvcc_parallel_update.cpp)
target_link_libraries(${test_prefix}mvcc_parallel_update mg-single-node kvstore_dummy_lib)
add_unit_test(pod_buffer.cpp)
target_link_libraries(${test_prefix}pod_buffer mg-single-node kvstore_dummy_lib)
add_unit_test(property_value_store.cpp)
target_link_libraries(${test_prefix}property_value_store kvstore_lib mg-single-node)
add_unit_test(replication_log.cpp)
target_link_libraries(${test_prefix}replication_log mg-single-node-ha kvstore_lib glog)
@ -173,29 +113,11 @@ target_link_libraries(${test_prefix}typed_value mg-query-with-kvstore-dummy)
# END mg-query
add_unit_test(queue.cpp)
target_link_libraries(${test_prefix}queue mg-single-node kvstore_dummy_lib)
add_unit_test(record_edge_vertex_accessor.cpp)
target_link_libraries(${test_prefix}record_edge_vertex_accessor mg-single-node kvstore_dummy_lib)
target_link_libraries(${test_prefix}queue glog gflags)
add_unit_test(skip_list.cpp)
target_link_libraries(${test_prefix}skip_list mg-utils)
add_unit_test(skiplist_access.cpp)
target_link_libraries(${test_prefix}skiplist_access mg-single-node kvstore_dummy_lib)
add_unit_test(skiplist_gc.cpp)
target_link_libraries(${test_prefix}skiplist_gc mg-single-node kvstore_dummy_lib)
add_unit_test(skiplist_position_and_count.cpp)
target_link_libraries(${test_prefix}skiplist_position_and_count mg-single-node kvstore_dummy_lib)
add_unit_test(skiplist_reverse_iteration.cpp)
target_link_libraries(${test_prefix}skiplist_reverse_iteration mg-single-node kvstore_dummy_lib)
add_unit_test(skiplist_suffix.cpp)
target_link_libraries(${test_prefix}skiplist_suffix mg-single-node kvstore_dummy_lib)
# TODO: REPLACE single-node-ha
add_unit_test(slk_advanced.cpp)
target_link_libraries(${test_prefix}slk_advanced mg-single-node-ha kvstore_dummy_lib)
@ -209,21 +131,6 @@ target_link_libraries(${test_prefix}slk_streams mg-slk glog gflags fmt)
add_unit_test(small_vector.cpp)
target_link_libraries(${test_prefix}small_vector mg-utils)
add_unit_test(state_delta.cpp)
target_link_libraries(${test_prefix}state_delta mg-single-node kvstore_dummy_lib)
add_unit_test(static_bitset.cpp)
target_link_libraries(${test_prefix}static_bitset mg-single-node kvstore_dummy_lib)
add_unit_test(storage_stat.cpp)
target_link_libraries(${test_prefix}storage_stat mg-single-node kvstore_dummy_lib)
add_unit_test(transaction_engine_single_node.cpp)
target_link_libraries(${test_prefix}transaction_engine_single_node mg-single-node kvstore_dummy_lib)
add_unit_test(unique_constraints.cpp)
target_link_libraries(${test_prefix}unique_constraints mg-single-node kvstore_dummy_lib)
# Test mg-communication
add_unit_test(bolt_chunked_decoder_buffer.cpp)

View File

@ -1,74 +0,0 @@
#include <map>
#include <thread>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "storage/common/types/types.hpp"
#include "storage/single_node/concurrent_id_mapper.hpp"
using IdLabel = storage::Label;
using MapperLabel = storage::ConcurrentIdMapper<IdLabel>;
TEST(ConcurrentIdMapper, SameValueGivesSameId) {
MapperLabel mapper;
EXPECT_EQ(mapper.value_to_id("a"), mapper.value_to_id("a"));
}
TEST(ConcurrentIdMapper, IdToValue) {
MapperLabel mapper;
std::string value = "a";
auto id = mapper.value_to_id(value);
EXPECT_EQ(value, mapper.id_to_value(id));
}
TEST(ConcurrentIdMapper, TwoValuesTwoIds) {
MapperLabel mapper;
EXPECT_NE(mapper.value_to_id("a"), mapper.value_to_id("b"));
}
TEST(ConcurrentIdMapper, SameIdReturnedMultipleThreads) {
const int thread_count = 20;
std::vector<std::string> values;
for (int i = 0; i < 50; ++i) values.emplace_back("value" + std::to_string(i));
// Perform the whole test a number of times since it's stochastic (we're
// trying to detect bad behavior in parallel execution).
for (int loop_ind = 0; loop_ind < 20; ++loop_ind) {
MapperLabel mapper;
std::vector<std::map<IdLabel, std::string>> mappings(thread_count);
std::vector<std::thread> threads;
for (int thread_ind = 0; thread_ind < thread_count; ++thread_ind) {
threads.emplace_back([&mapper, &mappings, &values, thread_ind] {
auto &mapping = mappings[thread_ind];
for (auto &value : values) {
mapping.emplace(mapper.value_to_id(value), value);
}
});
}
for (auto &thread : threads) thread.join();
EXPECT_EQ(mappings[0].size(), values.size());
for (auto &mapping : mappings) EXPECT_EQ(mapping, mappings[0]);
}
}
using IdProperty = storage::Property;
using MapperProperty = storage::ConcurrentIdMapper<IdProperty>;
TEST(ConcurrentIdMapper, PropertyLocation) {
// TODO(ipaljak): write unit tests for storage::Common and all
// derived classes (tests/unit/storage_types.cpp)
std::string prop_on_disk_name = "test_name1";
std::string prop_in_mem_name = "test_name2";
std::vector<std::string> props_on_disk = {prop_on_disk_name};
MapperProperty mapper(props_on_disk);
auto on_disk = mapper.value_to_id(prop_on_disk_name);
ASSERT_EQ(on_disk.Id(), 0);
ASSERT_EQ(on_disk.Location(), storage::Location::Disk);
auto in_mem = mapper.value_to_id(prop_in_mem_name);
ASSERT_EQ(in_mem.Id(), 1);
ASSERT_EQ(in_mem.Location(), storage::Location::Memory);
}

View File

@ -1,71 +0,0 @@
#include <iostream>
#include <fmt/format.h>
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "data_structures/concurrent/concurrent_map.hpp"
using concurrent_map_t = ConcurrentMap<int, int>;
template <typename TAccessor>
void print_skiplist(const TAccessor &access) {
DLOG(INFO) << "Map now has: ";
for (auto &kv : access)
DLOG(INFO) << fmt::format(" ({}, {})", kv.first, kv.second);
}
TEST(ConcurrentMapSkiplist, Mix) {
concurrent_map_t skiplist;
auto accessor = skiplist.access();
// insert 10
EXPECT_TRUE(accessor.insert(1, 10).second);
// try insert 10 again (should fail)
EXPECT_FALSE(accessor.insert(1, 10).second);
// insert 20
EXPECT_TRUE(accessor.insert(2, 20).second);
print_skiplist(accessor);
// value at key 3 shouldn't exist
EXPECT_TRUE(accessor.find(3) == accessor.end());
// value at key 2 should exist
EXPECT_TRUE(accessor.find(2) != accessor.end());
// at key 2 is 20 (true)
EXPECT_EQ(accessor.find(2)->second, 20);
// removed existing (1)
EXPECT_TRUE(accessor.remove(1));
// removed non-existing (3)
EXPECT_FALSE(accessor.remove(3));
// insert (1, 10)
EXPECT_TRUE(accessor.insert(1, 10).second);
// insert (4, 40)
EXPECT_TRUE(accessor.insert(4, 40).second);
print_skiplist(accessor);
}
TEST(ConcurrentMapSkiplist, ConstFind) {
ConcurrentMap<int, int> map;
{
auto access = map.access();
for (int i = 0; i < 10; ++i) access.insert(i, i);
}
{
const auto &const_map = map;
auto access = const_map.access();
auto it = access.find(4);
EXPECT_NE(it, access.end());
it = access.find(12);
EXPECT_EQ(it, access.end());
}
}

View File

@ -1,47 +0,0 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <vector>
#include "data_structures/concurrent/concurrent_map.hpp"
TEST(ConcurrentMap, Access) {
ConcurrentMap<int, int> input;
{
auto accessor = input.access();
accessor.insert(1, 1);
accessor.insert(2, 2);
accessor.insert(3, 3);
}
auto accessor = input.access();
std::vector<int> results;
for (auto it = accessor.begin(); it != accessor.end(); ++it)
results.push_back(it->first);
EXPECT_THAT(results, testing::ElementsAre(1, 2, 3));
}
TEST(ConcurrentMap, ConstAccess) {
ConcurrentMap<int, int> input;
{
auto accessor = input.access();
accessor.insert(1, 1);
accessor.insert(2, 2);
accessor.insert(3, 3);
}
const ConcurrentMap<int, int> &map = input;
auto accessor = map.access();
std::vector<int> results;
for (auto it = accessor.begin(); it != accessor.end(); ++it)
results.push_back(it->first);
EXPECT_THAT(results, testing::ElementsAre(1, 2, 3));
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,210 +0,0 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "storage/common/types/types.hpp"
#include "storage/single_node/vertex.hpp"
#include "transactions/single_node/engine.hpp"
#include "mvcc_gc_common.hpp"
using testing::UnorderedElementsAreArray;
// Test index does it insert everything uniquely
TEST(LabelsIndex, UniqueInsert) {
database::KeyIndex<storage::Label, Vertex> index;
database::GraphDb db;
auto dba = db.Access();
tx::Engine engine;
auto t1 = engine.Begin();
mvcc::VersionList<Vertex> vlist(*t1, storage::Gid::FromInt(0));
engine.Commit(*t1);
auto t2 = engine.Begin();
vlist.find(*t2)->labels_.push_back(dba.Label("1"));
index.Update(dba.Label("1"), &vlist, vlist.find(*t2));
// Try multiple inserts
index.Update(dba.Label("1"), &vlist, vlist.find(*t2));
vlist.find(*t2)->labels_.push_back(dba.Label("2"));
index.Update(dba.Label("2"), &vlist, vlist.find(*t2));
vlist.find(*t2)->labels_.push_back(dba.Label("3"));
index.Update(dba.Label("3"), &vlist, vlist.find(*t2));
engine.Commit(*t2);
EXPECT_EQ(index.Count(dba.Label("1")), 1);
EXPECT_EQ(index.Count(dba.Label("2")), 1);
EXPECT_EQ(index.Count(dba.Label("3")), 1);
}
// Check if index filters duplicates.
TEST(LabelsIndex, UniqueFilter) {
database::GraphDb db;
database::KeyIndex<storage::Label, Vertex> index;
auto dba = db.Access();
tx::Engine engine;
auto t1 = engine.Begin();
mvcc::VersionList<Vertex> vlist1(*t1, storage::Gid::FromInt(0));
mvcc::VersionList<Vertex> vlist2(*t1, storage::Gid::FromInt(1));
engine.Advance(t1->id_);
auto r1v1 = vlist1.find(*t1);
auto r1v2 = vlist2.find(*t1);
EXPECT_NE(vlist1.find(*t1), nullptr);
auto label1 = dba.Label("1");
vlist1.find(*t1)->labels_.push_back(label1);
vlist2.find(*t1)->labels_.push_back(label1);
index.Update(label1, &vlist1, r1v1);
index.Update(label1, &vlist2, r1v2);
engine.Commit(*t1);
auto t2 = engine.Begin();
auto r2v1 = vlist1.update(*t2);
auto r2v2 = vlist2.update(*t2);
index.Update(label1, &vlist1, r2v1);
index.Update(label1, &vlist2, r2v2);
engine.Commit(*t2);
auto t3 = engine.Begin();
std::vector<mvcc::VersionList<Vertex> *> expected = {&vlist1, &vlist2};
sort(expected.begin(),
expected.end()); // Entries will be sorted by vlist pointers.
int cnt = 0;
for (auto vlist : index.GetVlists(label1, *t3, false)) {
EXPECT_LT(cnt, expected.size());
EXPECT_EQ(vlist, expected[cnt++]);
}
}
// Delete not anymore relevant recods from index.
TEST(LabelsIndex, Refresh) {
database::KeyIndex<storage::Label, Vertex> index;
database::GraphDb db;
auto access = db.Access();
tx::Engine engine;
// add two vertices to database
auto t1 = engine.Begin();
mvcc::VersionList<Vertex> vlist1(*t1, storage::Gid::FromInt(0));
mvcc::VersionList<Vertex> vlist2(*t1, storage::Gid::FromInt(1));
engine.Advance(t1->id_);
auto v1r1 = vlist1.find(*t1);
auto v2r1 = vlist2.find(*t1);
EXPECT_NE(v1r1, nullptr);
EXPECT_NE(v2r1, nullptr);
auto label = access.Label("label");
v1r1->labels_.push_back(label);
v2r1->labels_.push_back(label);
index.Update(label, &vlist1, v1r1);
index.Update(label, &vlist2, v2r1);
engine.Commit(*t1);
auto t2 = engine.Begin();
auto v1r2 = vlist1.update(*t2);
auto v2r2 = vlist2.update(*t2);
index.Update(label, &vlist1, v1r2);
index.Update(label, &vlist2, v2r2);
index.Refresh(GcSnapshot(engine, t2), engine);
EXPECT_EQ(index.Count(label), 4);
engine.Commit(*t2);
EXPECT_EQ(index.Count(label), 4);
index.Refresh(GcSnapshot(engine, nullptr), engine);
EXPECT_EQ(index.Count(label), 2);
}
// Transaction hasn't ended and so the vertex is not visible.
TEST(LabelsIndexDb, AddGetZeroLabels) {
database::GraphDb db;
auto dba = db.Access();
auto vertex = dba.InsertVertex();
vertex.add_label(dba.Label("test"));
auto collection = dba.Vertices(dba.Label("test"), false);
std::vector<VertexAccessor> collection_vector(collection.begin(),
collection.end());
EXPECT_EQ(collection_vector.size(), (size_t)0);
}
// Test label index by adding and removing one vertex, and removing label from
// another, while the third one with an irrelevant label exists.
TEST(LabelsIndexDb, AddGetRemoveLabel) {
database::GraphDb db;
{
auto dba = db.Access();
auto vertex1 = dba.InsertVertex();
vertex1.add_label(dba.Label("test"));
auto vertex2 = dba.InsertVertex();
vertex2.add_label(dba.Label("test2"));
auto vertex3 = dba.InsertVertex();
vertex3.add_label(dba.Label("test"));
dba.Commit();
} // Finish transaction.
{
auto dba = db.Access();
auto filtered = dba.Vertices(dba.Label("test"), false);
std::vector<VertexAccessor> collection(filtered.begin(), filtered.end());
auto vertices = dba.Vertices(false);
std::vector<VertexAccessor> expected_collection;
for (auto vertex : vertices) {
if (vertex.has_label(dba.Label("test"))) {
expected_collection.push_back(vertex);
} else {
EXPECT_TRUE(vertex.has_label(dba.Label("test2")));
}
}
EXPECT_EQ(expected_collection.size(), collection.size());
EXPECT_TRUE(collection[0].has_label(dba.Label("test")));
EXPECT_TRUE(collection[1].has_label(dba.Label("test")));
EXPECT_FALSE(collection[0].has_label(dba.Label("test2")));
EXPECT_FALSE(collection[1].has_label(dba.Label("test2")));
dba.RemoveVertex(collection[0]); // Remove from database and test if
// index won't return it.
// Remove label from the vertex and add new label.
collection[1].remove_label(dba.Label("test"));
collection[1].add_label(dba.Label("test2"));
dba.Commit();
}
{
auto dba = db.Access();
auto filtered = dba.Vertices(dba.Label("test"), false);
std::vector<VertexAccessor> collection(filtered.begin(), filtered.end());
auto vertices = dba.Vertices(false);
std::vector<VertexAccessor> expected_collection;
for (auto vertex : vertices) {
if (vertex.has_label(dba.Label("test"))) {
expected_collection.push_back(vertex);
} else {
EXPECT_TRUE(vertex.has_label(dba.Label("test2")));
}
}
// It should be empty since everything with an old label is either deleted
// or doesn't have that label anymore.
EXPECT_EQ(expected_collection.size(), 0);
EXPECT_EQ(collection.size(), 0);
}
}
// TODO gleich - discuss with Flor the API changes and the tests
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,205 +0,0 @@
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "storage/common/types/types.hpp"
#include "storage/single_node/indexes/label_property_index.hpp"
#include "transactions/single_node/engine.hpp"
#include "mvcc_gc_common.hpp"
using namespace database;
class LabelPropertyIndexComplexTest : public ::testing::Test {
protected:
virtual void SetUp() {
auto accessor = db_.Access();
label = accessor.Label("label");
property = accessor.Property("property");
label2 = accessor.Label("label2");
property2 = accessor.Property("property2");
key = new LabelPropertyIndex::Key(label, property);
EXPECT_EQ(index.CreateIndex(*key), true);
t = engine.Begin();
vlist = new mvcc::VersionList<Vertex>(*t, storage::Gid::FromInt(0));
engine.Advance(t->id_);
vertex = vlist->find(*t);
ASSERT_NE(vertex, nullptr);
vertex->labels_.push_back(label);
vertex->properties_.set(property, PropertyValue(0));
EXPECT_EQ(index.Count(*key), 0);
}
virtual void TearDown() {
delete key;
delete vlist;
}
public:
GraphDb db_;
LabelPropertyIndex index;
LabelPropertyIndex::Key *key;
tx::Engine engine;
tx::Transaction *t{nullptr};
mvcc::VersionList<Vertex> *vlist;
Vertex *vertex;
storage::Label label;
storage::Property property;
storage::Label label2;
storage::Property property2;
};
TEST(LabelPropertyIndex, CreateIndex) {
GraphDb db;
auto accessor = db.Access();
LabelPropertyIndex::Key key(accessor.Label("test"),
accessor.Property("test2"));
LabelPropertyIndex index;
EXPECT_EQ(index.CreateIndex(key), true);
EXPECT_EQ(index.CreateIndex(key), false);
}
TEST(LabelPropertyIndex, DeleteIndex) {
GraphDb db;
auto accessor = db.Access();
LabelPropertyIndex::Key key(accessor.Label("test"),
accessor.Property("test2"));
LabelPropertyIndex index;
EXPECT_EQ(index.CreateIndex(key), true);
EXPECT_EQ(index.CreateIndex(key), false);
index.DeleteIndex(key);
EXPECT_EQ(index.CreateIndex(key), true);
}
TEST(LabelPropertyIndex, IndexExistance) {
GraphDb db;
auto accessor = db.Access();
LabelPropertyIndex::Key key(accessor.Label("test"),
accessor.Property("test2"));
LabelPropertyIndex index;
EXPECT_EQ(index.CreateIndex(key), true);
// Index doesn't exist - and can't be used untill it's been notified as built.
EXPECT_EQ(index.IndexExists(key), true);
}
TEST(LabelPropertyIndex, Count) {
GraphDb db;
auto accessor = db.Access();
auto label = accessor.Label("label");
auto property = accessor.Property("property");
LabelPropertyIndex::Key key(label, property);
LabelPropertyIndex index;
EXPECT_EQ(index.CreateIndex(key), true);
EXPECT_EQ(index.Count(key), 0);
}
// Add on label+property to index.
TEST_F(LabelPropertyIndexComplexTest, UpdateOnLabelPropertyTrue) {
index.UpdateOnLabelProperty(vlist, vertex);
EXPECT_EQ(index.Count(*key), 1);
}
// Try adding on label+property but fail because labels are clear.
TEST_F(LabelPropertyIndexComplexTest, UpdateOnLabelPropertyFalse) {
vertex->labels_.clear();
index.UpdateOnLabelProperty(vlist, vertex);
EXPECT_EQ(index.Count(*key), 0);
}
// Add on label to index.
TEST_F(LabelPropertyIndexComplexTest, UpdateOnLabelTrue) {
index.UpdateOnLabel(label, vlist, vertex);
EXPECT_EQ(index.Count(*key), 1);
}
// Try adding on label but fail because label is wrong.
TEST_F(LabelPropertyIndexComplexTest, UpdateOnLabelFalse) {
index.UpdateOnLabel(label2, vlist, vertex);
EXPECT_EQ(index.Count(*key), 0);
}
// Add on property to index.
TEST_F(LabelPropertyIndexComplexTest, UpdateOnPropertyTrue) {
index.UpdateOnProperty(property, vlist, vertex);
EXPECT_EQ(index.Count(*key), 1);
}
// Try adding on property but fail because property is wrong.
TEST_F(LabelPropertyIndexComplexTest, UpdateOnPropertyFalse) {
index.UpdateOnProperty(property2, vlist, vertex);
EXPECT_EQ(index.Count(*key), 0);
}
// Test index does it insert everything uniquely
TEST_F(LabelPropertyIndexComplexTest, UniqueInsert) {
index.UpdateOnLabelProperty(vlist, vertex);
index.UpdateOnLabelProperty(vlist, vertex);
EXPECT_EQ(index.Count(*key), 1);
}
// Check if index filters duplicates.
TEST_F(LabelPropertyIndexComplexTest, UniqueFilter) {
index.UpdateOnLabelProperty(vlist, vertex);
engine.Commit(*t);
auto t2 = engine.Begin();
auto vertex2 = vlist->update(*t2);
engine.Commit(*t2);
index.UpdateOnLabelProperty(vlist, vertex2);
EXPECT_EQ(index.Count(*key), 2);
auto t3 = engine.Begin();
auto iter = index.GetVlists(*key, *t3, false);
EXPECT_EQ(std::distance(iter.begin(), iter.end()), 1);
engine.Commit(*t3);
}
// Remove label and check if index vertex is not returned now.
TEST_F(LabelPropertyIndexComplexTest, RemoveLabel) {
index.UpdateOnLabelProperty(vlist, vertex);
auto iter1 = index.GetVlists(*key, *t, false);
EXPECT_EQ(std::distance(iter1.begin(), iter1.end()), 1);
vertex->labels_.clear();
auto iter2 = index.GetVlists(*key, *t, false);
EXPECT_EQ(std::distance(iter2.begin(), iter2.end()), 0);
}
// Remove property and check if vertex is not returned now.
TEST_F(LabelPropertyIndexComplexTest, RemoveProperty) {
index.UpdateOnLabelProperty(vlist, vertex);
auto iter1 = index.GetVlists(*key, *t, false);
EXPECT_EQ(std::distance(iter1.begin(), iter1.end()), 1);
vertex->properties_.clear();
auto iter2 = index.GetVlists(*key, *t, false);
EXPECT_EQ(std::distance(iter2.begin(), iter2.end()), 0);
}
// Refresh with a vertex that looses its labels and properties.
TEST_F(LabelPropertyIndexComplexTest, Refresh) {
index.UpdateOnLabelProperty(vlist, vertex);
engine.Commit(*t);
EXPECT_EQ(index.Count(*key), 1);
vertex->labels_.clear();
vertex->properties_.clear();
index.Refresh(GcSnapshot(engine, nullptr), engine);
auto iter = index.GetVlists(*key, *t, false);
EXPECT_EQ(std::distance(iter.begin(), iter.end()), 0);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,16 +0,0 @@
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "query/exceptions.hpp"
#include "query/interpreter.hpp"
DECLARE_int32(query_execution_time_sec);
TEST(TransactionTimeout, TransactionTimeout) {
FLAGS_query_execution_time_sec = 3;
database::GraphDb db;
auto dba = db.Access();
std::this_thread::sleep_for(std::chrono::seconds(5));
ASSERT_TRUE(dba.should_abort());
}

View File

@ -1,55 +0,0 @@
#include <gtest/gtest.h>
#include "storage/single_node/mvcc/record.hpp"
#include "storage/single_node/deferred_deleter.hpp"
#include "storage/single_node/vertex.hpp"
#include "mvcc_gc_common.hpp"
// Add and count objects.
TEST(DeferredDeleter, AddObjects) {
DeferredDeleter<Vertex> deleter;
for (int i = 0; i < 10; ++i) {
deleter.AddObject(new Vertex, 5);
deleter.AddObject(new Vertex, 5);
EXPECT_EQ(deleter.Count(), (i + 1) * 2);
}
deleter.FreeExpiredObjects(tx::Transaction::MaxId());
}
// Check that the deleter can't be destroyed while it still has objects.
TEST(DeferredDeleter, Destructor) {
std::atomic<int> count{0};
DeferredDeleter<DestrCountRec> *deleter = new DeferredDeleter<DestrCountRec>;
for (int i = 0; i < 10; ++i) {
deleter->AddObject(new DestrCountRec(count), 5);
deleter->AddObject(new DestrCountRec(count), 5);
EXPECT_EQ(deleter->Count(), (i + 1) * 2);
}
EXPECT_EQ(0, count);
EXPECT_DEATH(delete deleter, "");
// We shouldn't leak memory.
deleter->FreeExpiredObjects(tx::Transaction::MaxId());
delete deleter;
}
// Check if deleter frees objects.
TEST(DeferredDeleter, FreeExpiredObjects) {
DeferredDeleter<DestrCountRec> deleter;
std::atomic<int> count{0};
deleter.AddObject(new DestrCountRec(count), 5);
deleter.AddObject(new DestrCountRec(count), 5);
deleter.FreeExpiredObjects(5);
EXPECT_EQ(deleter.Count(), 2);
EXPECT_EQ(count, 0);
deleter.FreeExpiredObjects(6);
EXPECT_EQ(deleter.Count(), 0);
EXPECT_EQ(count, 2);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

File diff suppressed because it is too large Load Diff

View File

@ -1,131 +0,0 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "data_structures/bitset/dynamic_bitset.hpp"
namespace {
template <typename T>
class DynamicBitsetTest : public ::testing::Test {};
typedef ::testing::Types<DynamicBitset<>, DynamicBitset<uint8_t, 8>>
DynamicBitsetTypes;
TYPED_TEST_CASE(DynamicBitsetTest, DynamicBitsetTypes);
TYPED_TEST(DynamicBitsetTest, BasicAtAndSet) {
TypeParam db;
EXPECT_EQ(db.at(17, 1), 0);
EXPECT_EQ(db.at(17), false);
db.set(17, 1);
EXPECT_EQ(db.at(17, 1), 1);
EXPECT_EQ(db.at(17), true);
}
TYPED_TEST(DynamicBitsetTest, GroupAt) {
TypeParam db;
db.set(0, 1);
db.set(1, 1);
EXPECT_EQ(db.at(0, 2), 1 | 2);
db.set(3, 1);
EXPECT_EQ(db.at(0, 2), 1 | 2);
EXPECT_EQ(db.at(0, 3), 1 | 2);
EXPECT_EQ(db.at(0, 4), 1 | 2 | 8);
EXPECT_EQ(db.at(1, 1), 1);
EXPECT_EQ(db.at(1, 2), 1);
EXPECT_EQ(db.at(1, 3), 1 | 4);
}
TYPED_TEST(DynamicBitsetTest, GroupSet) {
TypeParam db;
EXPECT_EQ(db.at(0, 3), 0);
db.set(1, 2);
EXPECT_FALSE(db.at(0));
EXPECT_TRUE(db.at(1));
EXPECT_TRUE(db.at(2));
EXPECT_FALSE(db.at(3));
}
class Clear : public ::testing::Test {
protected:
DynamicBitset<> db;
void SetUp() override {
db.set(17, 1);
db.set(18, 1);
EXPECT_EQ(db.at(17), true);
EXPECT_EQ(db.at(18), true);
}
};
TEST_F(Clear, OneElement) {
db.clear(17, 1);
EXPECT_EQ(db.at(17), false);
EXPECT_EQ(db.at(18), true);
}
TEST_F(Clear, Group) {
db.clear(17, 2);
EXPECT_EQ(db.at(17), false);
EXPECT_EQ(db.at(18), false);
}
TEST_F(Clear, EmptyGroup) {
db.clear(17, 0);
EXPECT_EQ(db.at(17), true);
EXPECT_EQ(db.at(18), true);
}
TEST(DynamicBitset, ConstBitset) {
auto const_accepting = [](const DynamicBitset<> &cdbs) {
EXPECT_FALSE(cdbs.at(16));
EXPECT_TRUE(cdbs.at(17));
EXPECT_FALSE(cdbs.at(18));
};
DynamicBitset<> dbs;
dbs.set(17);
const_accepting(dbs);
}
TEST(DynamicBitSet, PrefixDeleteDontDeleteHead) {
DynamicBitset<uint8_t, 8> dbs;
dbs.set(7, 1);
dbs.delete_prefix(8);
EXPECT_EQ(dbs.at(7), 1);
}
// Checks that the block is not deleted when off by one error in interval
// endpoint
TEST(DynamicBitSet, PrefixDeleteDeleteOneBlockOffByOne) {
DynamicBitset<uint8_t, 8> dbs;
dbs.set(7, 1);
// Extends number of blocks
dbs.set(10, 1);
dbs.delete_prefix(7);
EXPECT_EQ(dbs.at(7), 1);
}
TEST(DynamicBitSet, DeletePrefixDeleteOneBlock) {
DynamicBitset<uint8_t, 8> dbs;
dbs.set(7, 1);
// Extends number of blocks
dbs.set(10, 1);
dbs.delete_prefix(8);
EXPECT_DEATH(dbs.at(7), "chunk is nullptr");
EXPECT_EQ(dbs.at(10), 1);
}
TEST(DynamicBitSet, DeletePrefixDeleteMultipleBlocks) {
DynamicBitset<uint8_t, 8> dbs;
dbs.set(7, 1);
dbs.set(15, 1);
dbs.set(23, 1);
dbs.set(31, 1);
dbs.delete_prefix(30);
EXPECT_DEATH(dbs.at(7), "chunk is nullptr");
EXPECT_DEATH(dbs.at(15), "chunk is nullptr");
EXPECT_DEATH(dbs.at(23), "chunk is nullptr");
EXPECT_EQ(dbs.at(31), 1);
}
} // namespace

View File

@ -1,102 +0,0 @@
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "storage/single_node/edge.hpp"
#include "storage/single_node/vertex.hpp"
#include "transactions/single_node/engine.hpp"
#include "utils/algorithm.hpp"
#include "storage/single_node/edges.hpp"
TEST(Edges, Filtering) {
Edges edges;
tx::Engine tx_engine;
auto tx = tx_engine.Begin();
int64_t vertex_gid = 0;
mvcc::VersionList<Vertex> v0(*tx, storage::Gid::FromInt(vertex_gid++));
mvcc::VersionList<Vertex> v1(*tx, storage::Gid::FromInt(vertex_gid++));
mvcc::VersionList<Vertex> v2(*tx, storage::Gid::FromInt(vertex_gid++));
mvcc::VersionList<Vertex> v3(*tx, storage::Gid::FromInt(vertex_gid++));
storage::EdgeType t1{1};
storage::EdgeType t2{2};
int64_t edge_gid = 0;
mvcc::VersionList<Edge> e1(*tx, storage::Gid::FromInt(edge_gid++), &v0, &v1,
t1);
edges.emplace(&v1, &e1, t1);
mvcc::VersionList<Edge> e2(*tx, storage::Gid::FromInt(edge_gid++), &v0, &v2,
t2);
edges.emplace(&v2, &e2, t2);
mvcc::VersionList<Edge> e3(*tx, storage::Gid::FromInt(edge_gid++), &v0, &v3,
t1);
edges.emplace(&v3, &e3, t1);
mvcc::VersionList<Edge> e4(*tx, storage::Gid::FromInt(edge_gid++), &v0, &v1,
t2);
edges.emplace(&v1, &e4, t2);
mvcc::VersionList<Edge> e5(*tx, storage::Gid::FromInt(edge_gid++), &v0, &v2,
t1);
edges.emplace(&v2, &e5, t1);
mvcc::VersionList<Edge> e6(*tx, storage::Gid::FromInt(edge_gid++), &v0, &v3,
t2);
edges.emplace(&v3, &e6, t2);
auto edge_addresses = [edges](mvcc::VersionList<Vertex> *dest,
std::vector<storage::EdgeType> *edge_types) {
std::vector<mvcc::VersionList<Edge> *> ret;
for (auto it = edges.begin(dest, edge_types); it != edges.end(); ++it)
ret.push_back(it->edge);
return ret;
};
{ // no filtering
EXPECT_THAT(edge_addresses(nullptr, nullptr),
::testing::UnorderedElementsAre(&e1, &e2, &e3, &e4, &e5, &e6));
}
{
// filter by node
EXPECT_THAT(edge_addresses(&v1, nullptr),
::testing::UnorderedElementsAre(&e1, &e4));
EXPECT_THAT(edge_addresses(&v2, nullptr),
::testing::UnorderedElementsAre(&e2, &e5));
EXPECT_THAT(edge_addresses(&v3, nullptr),
::testing::UnorderedElementsAre(&e3, &e6));
}
{
// filter by edge type
std::vector<storage::EdgeType> f1{t1};
std::vector<storage::EdgeType> f2{t2};
std::vector<storage::EdgeType> f3{t1, t2};
EXPECT_THAT(edge_addresses(nullptr, &f1),
::testing::UnorderedElementsAre(&e1, &e3, &e5));
EXPECT_THAT(edge_addresses(nullptr, &f2),
::testing::UnorderedElementsAre(&e2, &e4, &e6));
EXPECT_THAT(edge_addresses(nullptr, &f3),
::testing::UnorderedElementsAre(&e1, &e2, &e3, &e4, &e5, &e6));
}
{
// filter by both node and edge type
std::vector<storage::EdgeType> f1{t1};
std::vector<storage::EdgeType> f2{t2};
EXPECT_THAT(edge_addresses(&v1, &f1), ::testing::UnorderedElementsAre(&e1));
EXPECT_THAT(edge_addresses(&v1, &f2), ::testing::UnorderedElementsAre(&e4));
EXPECT_THAT(edge_addresses(&v2, &f1), ::testing::UnorderedElementsAre(&e5));
EXPECT_THAT(edge_addresses(&v2, &f2), ::testing::UnorderedElementsAre(&e2));
EXPECT_THAT(edge_addresses(&v3, &f1), ::testing::UnorderedElementsAre(&e3));
EXPECT_THAT(edge_addresses(&v3, &f2), ::testing::UnorderedElementsAre(&e6));
}
tx_engine.Abort(*tx);
}

View File

@ -1,38 +0,0 @@
#include <memory>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "storage/common/types/types.hpp"
#include "storage/single_node/indexes/label_property_index.hpp"
TEST(GraphDbTest, GarbageCollectIndices) {
database::Config config;
config.gc_cycle_sec = -1;
database::GraphDb graph_db{config};
auto dba = graph_db.Access();
auto commit = [&] {
dba.Commit();
dba = graph_db.Access();
};
auto label = dba.Label("label");
auto property = dba.Property("property");
dba.BuildIndex(label, property);
commit();
auto vertex = dba.InsertVertex();
vertex.add_label(label);
vertex.PropsSet(property, PropertyValue(42));
commit();
EXPECT_EQ(dba.VerticesCount(label, property), 1);
auto vertex_transferred = dba.Transfer(vertex);
dba.RemoveVertex(vertex_transferred.value());
EXPECT_EQ(dba.VerticesCount(label, property), 1);
commit();
EXPECT_EQ(dba.VerticesCount(label, property), 1);
graph_db.CollectGarbage();
EXPECT_EQ(dba.VerticesCount(label, property), 0);
}

View File

@ -1,398 +0,0 @@
#include <optional>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "storage/common/types/types.hpp"
#include "storage/edge_accessor.hpp"
#include "storage/vertex_accessor.hpp"
using namespace database;
using namespace storage;
template <typename TIterable>
auto Count(TIterable iterable) {
return std::distance(iterable.begin(), iterable.end());
}
TEST(GraphDbAccessorTest, InsertVertex) {
GraphDb db;
auto accessor = db.Access();
storage::GidGenerator generator;
EXPECT_EQ(Count(accessor.Vertices(false)), 0);
EXPECT_EQ(accessor.InsertVertex().gid(), generator.Next());
EXPECT_EQ(Count(accessor.Vertices(false)), 0);
EXPECT_EQ(Count(accessor.Vertices(true)), 1);
accessor.AdvanceCommand();
EXPECT_EQ(Count(accessor.Vertices(false)), 1);
EXPECT_EQ(accessor.InsertVertex().gid(), generator.Next());
EXPECT_EQ(Count(accessor.Vertices(false)), 1);
EXPECT_EQ(Count(accessor.Vertices(true)), 2);
accessor.AdvanceCommand();
EXPECT_EQ(Count(accessor.Vertices(false)), 2);
}
TEST(GraphDbAccessorTest, UniqueVertexId) {
GraphDb db;
SkipList<storage::Gid> ids;
std::vector<std::thread> threads;
for (int i = 0; i < 50; i++) {
threads.emplace_back([&db, &ids]() {
auto dba = db.Access();
auto access = ids.access();
for (int i = 0; i < 200; i++) access.insert(dba.InsertVertex().gid());
});
}
for (auto &thread : threads) thread.join();
EXPECT_EQ(ids.access().size(), 50 * 200);
}
TEST(GraphDbAccessorTest, RemoveVertexSameTransaction) {
GraphDb db;
auto accessor = db.Access();
EXPECT_EQ(Count(accessor.Vertices(false)), 0);
auto va1 = accessor.InsertVertex();
accessor.AdvanceCommand();
EXPECT_EQ(Count(accessor.Vertices(false)), 1);
EXPECT_TRUE(accessor.RemoveVertex(va1));
EXPECT_EQ(Count(accessor.Vertices(false)), 1);
EXPECT_EQ(Count(accessor.Vertices(true)), 0);
accessor.AdvanceCommand();
EXPECT_EQ(Count(accessor.Vertices(false)), 0);
EXPECT_EQ(Count(accessor.Vertices(true)), 0);
}
TEST(GraphDbAccessorTest, RemoveVertexDifferentTransaction) {
GraphDb db;
// first transaction creates a vertex
{
auto accessor = db.Access();
accessor.InsertVertex();
accessor.Commit();
}
// second transaction checks that it sees it, and deletes it
{
auto accessor = db.Access();
EXPECT_EQ(Count(accessor.Vertices(false)), 1);
EXPECT_EQ(Count(accessor.Vertices(true)), 1);
for (auto vertex_accessor : accessor.Vertices(false))
accessor.RemoveVertex(vertex_accessor);
accessor.Commit();
}
// third transaction checks that it does not see the vertex
{
auto accessor = db.Access();
EXPECT_EQ(Count(accessor.Vertices(false)), 0);
EXPECT_EQ(Count(accessor.Vertices(true)), 0);
}
}
TEST(GraphDbAccessorTest, InsertEdge) {
GraphDb db;
auto dba = db.Access();
auto va1 = dba.InsertVertex();
auto va2 = dba.InsertVertex();
dba.AdvanceCommand();
EXPECT_EQ(va1.in_degree(), 0);
EXPECT_EQ(va1.out_degree(), 0);
EXPECT_EQ(va2.in_degree(), 0);
EXPECT_EQ(va2.out_degree(), 0);
// setup (v1) - [:likes] -> (v2)
dba.InsertEdge(va1, va2, dba.EdgeType("likes"));
EXPECT_EQ(Count(dba.Edges(false)), 0);
EXPECT_EQ(Count(dba.Edges(true)), 1);
dba.AdvanceCommand();
EXPECT_EQ(Count(dba.Edges(false)), 1);
EXPECT_EQ(Count(dba.Edges(true)), 1);
EXPECT_EQ(va1.out().begin()->to(), va2);
EXPECT_EQ(va2.in().begin()->from(), va1);
EXPECT_EQ(va1.in_degree(), 0);
EXPECT_EQ(va1.out_degree(), 1);
EXPECT_EQ(va2.in_degree(), 1);
EXPECT_EQ(va2.out_degree(), 0);
// setup (v1) - [:likes] -> (v2) <- [:hates] - (v3)
auto va3 = dba.InsertVertex();
dba.InsertEdge(va3, va2, dba.EdgeType("hates"));
EXPECT_EQ(Count(dba.Edges(false)), 1);
EXPECT_EQ(Count(dba.Edges(true)), 2);
dba.AdvanceCommand();
EXPECT_EQ(Count(dba.Edges(false)), 2);
EXPECT_EQ(va3.out().begin()->to(), va2);
EXPECT_EQ(va1.in_degree(), 0);
EXPECT_EQ(va1.out_degree(), 1);
EXPECT_EQ(va2.in_degree(), 2);
EXPECT_EQ(va2.out_degree(), 0);
EXPECT_EQ(va3.in_degree(), 0);
EXPECT_EQ(va3.out_degree(), 1);
}
TEST(GraphDbAccessorTest, UniqueEdgeId) {
GraphDb db;
SkipList<storage::Gid> ids;
std::vector<std::thread> threads;
for (int i = 0; i < 50; i++) {
threads.emplace_back([&db, &ids]() {
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
auto edge_type = dba.EdgeType("edge_type");
auto access = ids.access();
for (int i = 0; i < 200; i++)
access.insert(dba.InsertEdge(v1, v2, edge_type).gid());
});
}
for (auto &thread : threads) thread.join();
EXPECT_EQ(ids.access().size(), 50 * 200);
}
TEST(GraphDbAccessorTest, RemoveEdge) {
GraphDb db;
auto dba = db.Access();
// setup (v1) - [:likes] -> (v2) <- [:hates] - (v3)
auto va1 = dba.InsertVertex();
auto va2 = dba.InsertVertex();
auto va3 = dba.InsertVertex();
dba.InsertEdge(va1, va2, dba.EdgeType("likes"));
dba.InsertEdge(va3, va2, dba.EdgeType("hates"));
dba.AdvanceCommand();
EXPECT_EQ(Count(dba.Edges(false)), 2);
EXPECT_EQ(Count(dba.Edges(true)), 2);
// remove all [:hates] edges
for (auto edge : dba.Edges(false))
if (edge.EdgeType() == dba.EdgeType("hates")) dba.RemoveEdge(edge);
EXPECT_EQ(Count(dba.Edges(false)), 2);
EXPECT_EQ(Count(dba.Edges(true)), 1);
// current state: (v1) - [:likes] -> (v2), (v3)
dba.AdvanceCommand();
EXPECT_EQ(Count(dba.Edges(false)), 1);
EXPECT_EQ(Count(dba.Edges(true)), 1);
EXPECT_EQ(Count(dba.Vertices(false)), 3);
EXPECT_EQ(Count(dba.Vertices(true)), 3);
for (auto edge : dba.Edges(false)) {
EXPECT_EQ(edge.EdgeType(), dba.EdgeType("likes"));
auto v1 = edge.from();
auto v2 = edge.to();
// ensure correct connectivity for all the vertices
for (auto vertex : dba.Vertices(false)) {
if (vertex == v1) {
EXPECT_EQ(vertex.in_degree(), 0);
EXPECT_EQ(vertex.out_degree(), 1);
} else if (vertex == v2) {
EXPECT_EQ(vertex.in_degree(), 1);
EXPECT_EQ(vertex.out_degree(), 0);
} else {
EXPECT_EQ(vertex.in_degree(), 0);
EXPECT_EQ(vertex.out_degree(), 0);
}
}
}
}
TEST(GraphDbAccessorTest, DetachRemoveVertex) {
GraphDb db;
auto dba = db.Access();
// setup (v0)- []->(v1)<-[]-(v2)<-[]-(v3)
std::vector<VertexAccessor> vertices;
for (int i = 0; i < 4; ++i) vertices.emplace_back(dba.InsertVertex());
auto edge_type = dba.EdgeType("type");
dba.InsertEdge(vertices[0], vertices[1], edge_type);
dba.InsertEdge(vertices[2], vertices[1], edge_type);
dba.InsertEdge(vertices[3], vertices[2], edge_type);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
// ensure that plain remove does NOT work
EXPECT_EQ(Count(dba.Vertices(false)), 4);
EXPECT_EQ(Count(dba.Edges(false)), 3);
EXPECT_FALSE(dba.RemoveVertex(vertices[0]));
EXPECT_FALSE(dba.RemoveVertex(vertices[1]));
EXPECT_FALSE(dba.RemoveVertex(vertices[2]));
EXPECT_EQ(Count(dba.Vertices(false)), 4);
EXPECT_EQ(Count(dba.Edges(false)), 3);
dba.DetachRemoveVertex(vertices[2]);
EXPECT_EQ(Count(dba.Vertices(false)), 4);
EXPECT_EQ(Count(dba.Vertices(true)), 3);
EXPECT_EQ(Count(dba.Edges(false)), 3);
EXPECT_EQ(Count(dba.Edges(true)), 1);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), 3);
EXPECT_EQ(Count(dba.Edges(false)), 1);
EXPECT_TRUE(dba.RemoveVertex(vertices[3]));
EXPECT_EQ(Count(dba.Vertices(true)), 2);
EXPECT_EQ(Count(dba.Vertices(false)), 3);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), 2);
EXPECT_EQ(Count(dba.Edges(false)), 1);
for (auto va : dba.Vertices(false)) EXPECT_FALSE(dba.RemoveVertex(va));
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), 2);
EXPECT_EQ(Count(dba.Edges(false)), 1);
for (auto va : dba.Vertices(false)) {
EXPECT_FALSE(dba.RemoveVertex(va));
dba.DetachRemoveVertex(va);
break;
}
EXPECT_EQ(Count(dba.Vertices(true)), 1);
EXPECT_EQ(Count(dba.Vertices(false)), 2);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), 1);
EXPECT_EQ(Count(dba.Edges(false)), 0);
// remove the last vertex, it has no connections
// so that should work
for (auto va : dba.Vertices(false)) EXPECT_TRUE(dba.RemoveVertex(va));
dba.AdvanceCommand();
EXPECT_EQ(Count(dba.Vertices(false)), 0);
EXPECT_EQ(Count(dba.Edges(false)), 0);
}
TEST(GraphDbAccessorTest, DetachRemoveVertexMultiple) {
// This test checks that we can detach remove the
// same vertex / edge multiple times
GraphDb db;
auto dba = db.Access();
// setup: make a fully connected N graph
// with cycles too!
int N = 7;
std::vector<VertexAccessor> vertices;
auto edge_type = dba.EdgeType("edge");
for (int i = 0; i < N; ++i) vertices.emplace_back(dba.InsertVertex());
for (int j = 0; j < N; ++j)
for (int k = 0; k < N; ++k)
dba.InsertEdge(vertices[j], vertices[k], edge_type);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), N);
EXPECT_EQ(Count(dba.Edges(false)), N * N);
// detach delete one edge
dba.DetachRemoveVertex(vertices[0]);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), N - 1);
EXPECT_EQ(Count(dba.Edges(false)), (N - 1) * (N - 1));
// detach delete two neighboring edges
dba.DetachRemoveVertex(vertices[1]);
dba.DetachRemoveVertex(vertices[2]);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), N - 3);
EXPECT_EQ(Count(dba.Edges(false)), (N - 3) * (N - 3));
// detach delete everything, buwahahahaha
for (int l = 3; l < N; ++l) dba.DetachRemoveVertex(vertices[l]);
dba.AdvanceCommand();
for (auto &vertex : vertices) vertex.Reconstruct();
EXPECT_EQ(Count(dba.Vertices(false)), 0);
EXPECT_EQ(Count(dba.Edges(false)), 0);
}
TEST(GraphDbAccessorTest, Labels) {
GraphDb db;
auto dba = db.Access();
Label label_friend = dba.Label("friend");
EXPECT_EQ(label_friend, dba.Label("friend"));
EXPECT_NE(label_friend, dba.Label("friend2"));
EXPECT_EQ(dba.LabelName(label_friend), "friend");
// test that getting labels through a different accessor works
EXPECT_EQ(label_friend, db.Access().Label("friend"));
EXPECT_NE(label_friend, db.Access().Label("friend2"));
}
TEST(GraphDbAccessorTest, EdgeTypes) {
GraphDb db;
auto dba = db.Access();
EdgeType edge_type = dba.EdgeType("likes");
EXPECT_EQ(edge_type, dba.EdgeType("likes"));
EXPECT_NE(edge_type, dba.EdgeType("hates"));
EXPECT_EQ(dba.EdgeTypeName(edge_type), "likes");
// test that getting labels through a different accessor works
EXPECT_EQ(edge_type, db.Access().EdgeType("likes"));
EXPECT_NE(edge_type, db.Access().EdgeType("hates"));
}
TEST(GraphDbAccessorTest, Properties) {
GraphDb db;
auto dba = db.Access();
Property prop = dba.Property("name");
EXPECT_EQ(prop, dba.Property("name"));
EXPECT_NE(prop, dba.Property("surname"));
EXPECT_EQ(dba.PropertyName(prop), "name");
// test that getting labels through a different accessor works
EXPECT_EQ(prop, db.Access().Property("name"));
EXPECT_NE(prop, db.Access().Property("surname"));
}
TEST(GraphDbAccessorTest, Transfer) {
GraphDb db;
auto dba1 = db.Access();
auto prop = dba1.Property("property");
VertexAccessor v1 = dba1.InsertVertex();
v1.PropsSet(prop, PropertyValue(1));
VertexAccessor v2 = dba1.InsertVertex();
v2.PropsSet(prop, PropertyValue(2));
EdgeAccessor e12 = dba1.InsertEdge(v1, v2, dba1.EdgeType("et"));
e12.PropsSet(prop, PropertyValue(12));
// make dba2 that has dba1 in it's snapshot, so data isn't visible
auto dba2 = db.Access();
EXPECT_EQ(dba2.Transfer(v1), std::nullopt);
EXPECT_EQ(dba2.Transfer(e12), std::nullopt);
// make dba3 that does not have dba1 in it's snapshot
dba1.Commit();
auto dba3 = db.Access();
// we can transfer accessors even though the GraphDbAccessor they
// belong to is not alive anymore
EXPECT_EQ(dba3.Transfer(v1)->PropsAt(prop).ValueInt(), 1);
EXPECT_EQ(dba3.Transfer(e12)->PropsAt(prop).ValueInt(), 12);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
// ::testing::GTEST_FLAG(filter) = "*.DetachRemoveVertex";
return RUN_ALL_TESTS();
}

View File

@ -1,445 +0,0 @@
#include <atomic>
#include <memory>
#include <optional>
#include <thread>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "utils/bound.hpp"
using testing::UnorderedElementsAreArray;
template <typename TIterable>
auto Count(TIterable iterable) {
return std::distance(iterable.begin(), iterable.end());
}
/**
* A test fixture that contains a database, accessor,
* label, property and an edge_type.
*/
class GraphDbAccessorIndex : public testing::Test {
protected:
database::GraphDb db;
database::GraphDbAccessor dba{db.Access()};
storage::Property property = dba.Property("property");
storage::Label label = dba.Label("label");
storage::EdgeType edge_type = dba.EdgeType("edge_type");
auto AddVertex() {
auto vertex = dba.InsertVertex();
vertex.add_label(label);
return vertex;
}
auto AddVertex(int property_value) {
auto vertex = dba.InsertVertex();
vertex.add_label(label);
vertex.PropsSet(property, PropertyValue(property_value));
return vertex;
}
// commits the current dba, and replaces it with a new one
void Commit() {
dba.Commit();
dba = db.Access();
}
};
TEST_F(GraphDbAccessorIndex, LabelIndexCount) {
auto label2 = dba.Label("label2");
EXPECT_EQ(dba.VerticesCount(label), 0);
EXPECT_EQ(dba.VerticesCount(label2), 0);
EXPECT_EQ(dba.VerticesCount(), 0);
for (int i = 0; i < 11; ++i) dba.InsertVertex().add_label(label);
for (int i = 0; i < 17; ++i) dba.InsertVertex().add_label(label2);
// even though xxx_count functions in database::GraphDbAccessor can
// over-estaimate in this situation they should be exact (nothing was ever
// deleted)
EXPECT_EQ(dba.VerticesCount(label), 11);
EXPECT_EQ(dba.VerticesCount(label2), 17);
EXPECT_EQ(dba.VerticesCount(), 28);
}
TEST_F(GraphDbAccessorIndex, LabelIndexIteration) {
// add 10 vertices, check visibility
for (int i = 0; i < 10; i++) AddVertex();
EXPECT_EQ(Count(dba.Vertices(label, false)), 0);
EXPECT_EQ(Count(dba.Vertices(label, true)), 10);
Commit();
EXPECT_EQ(Count(dba.Vertices(label, false)), 10);
EXPECT_EQ(Count(dba.Vertices(label, true)), 10);
// remove 3 vertices, check visibility
int deleted = 0;
for (auto vertex : dba.Vertices(false)) {
dba.RemoveVertex(vertex);
if (++deleted >= 3) break;
}
EXPECT_EQ(Count(dba.Vertices(label, false)), 10);
EXPECT_EQ(Count(dba.Vertices(label, true)), 7);
Commit();
EXPECT_EQ(Count(dba.Vertices(label, false)), 7);
EXPECT_EQ(Count(dba.Vertices(label, true)), 7);
}
TEST_F(GraphDbAccessorIndex, EdgesCount) {
auto edge_type2 = dba.EdgeType("edge_type2");
EXPECT_EQ(dba.EdgesCount(), 0);
auto v1 = AddVertex();
auto v2 = AddVertex();
for (int i = 0; i < 11; ++i) dba.InsertEdge(v1, v2, edge_type);
for (int i = 0; i < 17; ++i) dba.InsertEdge(v1, v2, edge_type2);
// even though xxx_count functions in database::GraphDbAccessor can
// over-estaimate in this situation they should be exact (nothing was ever
// deleted)
EXPECT_EQ(dba.EdgesCount(), 28);
}
TEST_F(GraphDbAccessorIndex, LabelPropertyIndexBuild) {
AddVertex(0);
Commit();
dba.BuildIndex(label, property);
Commit();
EXPECT_EQ(dba.VerticesCount(label, property), 1);
// confirm there is a differentiation of indexes based on (label, property)
auto label2 = dba.Label("label2");
auto property2 = dba.Property("property2");
dba.BuildIndex(label2, property);
dba.BuildIndex(label, property2);
Commit();
EXPECT_EQ(dba.VerticesCount(label, property), 1);
EXPECT_EQ(dba.VerticesCount(label2, property), 0);
EXPECT_EQ(dba.VerticesCount(label, property2), 0);
}
TEST_F(GraphDbAccessorIndex, LabelPropertyIndexDelete) {
dba.BuildIndex(label, property);
Commit();
EXPECT_TRUE(dba.LabelPropertyIndexExists(label, property));
dba.DeleteIndex(label, property);
Commit();
EXPECT_FALSE(dba.LabelPropertyIndexExists(label, property));
}
TEST_F(GraphDbAccessorIndex, LabelPropertyIndexBuildTwice) {
dba.BuildIndex(label, property);
EXPECT_THROW(dba.BuildIndex(label, property), utils::BasicException);
}
TEST_F(GraphDbAccessorIndex, LabelPropertyIndexCount) {
dba.BuildIndex(label, property);
EXPECT_EQ(dba.VerticesCount(label, property), 0);
EXPECT_EQ(Count(dba.Vertices(label, property, true)), 0);
for (int i = 0; i < 14; ++i) AddVertex(0);
EXPECT_EQ(dba.VerticesCount(label, property), 14);
EXPECT_EQ(Count(dba.Vertices(label, property, true)), 14);
}
TEST(GraphDbAccessorIndexApi, LabelPropertyBuildIndexConcurrent) {
const int THREAD_COUNT = 10;
std::vector<std::thread> threads;
database::GraphDb db;
std::atomic<bool> failed{false};
for (int index = 0; index < THREAD_COUNT; ++index) {
threads.emplace_back([&db, &failed, index]() {
// If we fail to create a new transaction, don't bother.
try {
auto dba = db.Access();
try {
// This could either pass or throw.
dba.BuildIndex(dba.Label("l" + std::to_string(index)),
dba.Property("p" + std::to_string(index)));
// If it throws, make sure the exception is right.
} catch (const database::TransactionException &e) {
// Nothing to see here, move along.
} catch (...) {
failed.store(true);
}
} catch (...) {
// Ignore this one also.
}
});
}
for (auto &thread : threads) {
if (thread.joinable()) {
thread.join();
}
}
EXPECT_FALSE(failed.load());
}
#define EXPECT_WITH_MARGIN(x, center) \
EXPECT_THAT( \
x, testing::AllOf(testing::Ge(center - 2), testing::Le(center + 2)));
template <class TValue>
auto Inclusive(TValue value) {
return std::make_optional(utils::MakeBoundInclusive(PropertyValue(value)));
}
template <class TValue>
auto Exclusive(TValue value) {
return std::make_optional(utils::MakeBoundExclusive(PropertyValue(value)));
}
TEST_F(GraphDbAccessorIndex, LabelPropertyValueCount) {
dba.BuildIndex(label, property);
// add some vertices without the property
for (int i = 0; i < 20; i++) AddVertex();
// add vertices with prop values [0, 29), ten vertices for each value
for (int i = 0; i < 300; i++) AddVertex(i / 10);
// add verties in t he [30, 40) range, 100 vertices for each value
for (int i = 0; i < 1000; i++) AddVertex(30 + i / 100);
// test estimates for exact value count
EXPECT_WITH_MARGIN(dba.VerticesCount(label, property, PropertyValue(10)), 10);
EXPECT_WITH_MARGIN(dba.VerticesCount(label, property, PropertyValue(14)), 10);
EXPECT_WITH_MARGIN(dba.VerticesCount(label, property, PropertyValue(30)),
100);
EXPECT_WITH_MARGIN(dba.VerticesCount(label, property, PropertyValue(39)),
100);
EXPECT_EQ(dba.VerticesCount(label, property, PropertyValue(40)), 0);
// helper functions
auto VerticesCount = [this](auto lower, auto upper) {
return dba.VerticesCount(label, property, lower, upper);
};
using std::nullopt;
::testing::FLAGS_gtest_death_test_style = "threadsafe";
EXPECT_DEATH(VerticesCount(nullopt, nullopt), "bound must be provided");
EXPECT_WITH_MARGIN(VerticesCount(nullopt, Exclusive(4)), 40);
EXPECT_WITH_MARGIN(VerticesCount(nullopt, Inclusive(4)), 50);
EXPECT_WITH_MARGIN(VerticesCount(Exclusive(13), nullopt), 160 + 1000);
EXPECT_WITH_MARGIN(VerticesCount(Inclusive(13), nullopt), 170 + 1000);
EXPECT_WITH_MARGIN(VerticesCount(Inclusive(13), Exclusive(14)), 10);
EXPECT_WITH_MARGIN(VerticesCount(Exclusive(13), Inclusive(14)), 10);
EXPECT_WITH_MARGIN(VerticesCount(Exclusive(13), Exclusive(13)), 0);
EXPECT_WITH_MARGIN(VerticesCount(Inclusive(20), Exclusive(13)), 0);
}
#undef EXPECT_WITH_MARGIN
TEST_F(GraphDbAccessorIndex, LabelPropertyValueIteration) {
dba.BuildIndex(label, property);
Commit();
// insert 10 verties and and check visibility
for (int i = 0; i < 10; i++) AddVertex(12);
EXPECT_EQ(Count(dba.Vertices(label, property, PropertyValue(12), false)), 0);
EXPECT_EQ(Count(dba.Vertices(label, property, PropertyValue(12), true)), 10);
Commit();
EXPECT_EQ(Count(dba.Vertices(label, property, PropertyValue(12), false)), 10);
EXPECT_EQ(Count(dba.Vertices(label, property, PropertyValue(12), true)), 10);
}
TEST_F(GraphDbAccessorIndex, LabelPropertyValueSorting) {
dba.BuildIndex(label, property);
Commit();
std::vector<PropertyValue> expected_property_value(50, PropertyValue(0));
// bools - insert in reverse to check for comparison between values.
for (int i = 9; i >= 0; --i) {
auto vertex_accessor = dba.InsertVertex();
vertex_accessor.add_label(label);
vertex_accessor.PropsSet(property, PropertyValue(static_cast<bool>(i / 5)));
expected_property_value[i] = vertex_accessor.PropsAt(property);
}
// integers
for (int i = 0; i < 10; ++i) {
auto vertex_accessor = dba.InsertVertex();
vertex_accessor.add_label(label);
vertex_accessor.PropsSet(property, PropertyValue(i));
expected_property_value[10 + 2 * i] = vertex_accessor.PropsAt(property);
}
// doubles
for (int i = 0; i < 10; ++i) {
auto vertex_accessor = dba.InsertVertex();
vertex_accessor.add_label(label);
vertex_accessor.PropsSet(property,
PropertyValue(static_cast<double>(i + 0.5)));
expected_property_value[10 + 2 * i + 1] = vertex_accessor.PropsAt(property);
}
// strings
for (int i = 0; i < 10; ++i) {
auto vertex_accessor = dba.InsertVertex();
vertex_accessor.add_label(label);
vertex_accessor.PropsSet(property, PropertyValue(std::to_string(i)));
expected_property_value[30 + i] = vertex_accessor.PropsAt(property);
}
// lists of ints - insert in reverse to check for comparision between
// lists.
for (int i = 9; i >= 0; --i) {
auto vertex_accessor = dba.InsertVertex();
vertex_accessor.add_label(label);
std::vector<PropertyValue> value;
value.push_back(PropertyValue(i));
vertex_accessor.PropsSet(property, PropertyValue(value));
expected_property_value[40 + i] = vertex_accessor.PropsAt(property);
}
// Maps. Declare a vector in the expected order, then shuffle when setting on
// vertices.
std::vector<std::map<std::string, PropertyValue>> maps{
{{"b", PropertyValue(12)}},
{{"b", PropertyValue(12)}, {"a", PropertyValue(77)}},
{{"a", PropertyValue(77)}, {"c", PropertyValue(0)}},
{{"a", PropertyValue(78)}, {"b", PropertyValue(12)}}};
for (const auto &map : maps) expected_property_value.emplace_back(map);
auto shuffled = maps;
std::random_shuffle(shuffled.begin(), shuffled.end());
for (const auto &map : shuffled) {
auto vertex_accessor = dba.InsertVertex();
vertex_accessor.add_label(label);
vertex_accessor.PropsSet(property, PropertyValue(map));
}
EXPECT_EQ(Count(dba.Vertices(label, property, false)), 0);
EXPECT_EQ(Count(dba.Vertices(label, property, true)), 54);
int cnt = 0;
for (auto vertex : dba.Vertices(label, property, true)) {
const PropertyValue &property_value = vertex.PropsAt(property);
EXPECT_EQ(property_value.type(), expected_property_value[cnt].type());
switch (property_value.type()) {
case PropertyValue::Type::Bool:
EXPECT_EQ(property_value.ValueBool(),
expected_property_value[cnt].ValueBool());
break;
case PropertyValue::Type::Double:
EXPECT_EQ(property_value.ValueDouble(),
expected_property_value[cnt].ValueDouble());
break;
case PropertyValue::Type::Int:
EXPECT_EQ(property_value.ValueInt(),
expected_property_value[cnt].ValueInt());
break;
case PropertyValue::Type::String:
EXPECT_EQ(property_value.ValueString(),
expected_property_value[cnt].ValueString());
break;
case PropertyValue::Type::List: {
auto received_value = property_value.ValueList();
auto expected_value = expected_property_value[cnt].ValueList();
EXPECT_EQ(received_value.size(), expected_value.size());
EXPECT_EQ(received_value.size(), 1);
EXPECT_EQ(received_value[0].ValueInt(), expected_value[0].ValueInt());
break;
}
case PropertyValue::Type::Map: {
auto received_value = property_value.ValueMap();
auto expected_value = expected_property_value[cnt].ValueMap();
EXPECT_EQ(received_value.size(), expected_value.size());
for (const auto &kv : expected_value) {
auto found = expected_value.find(kv.first);
EXPECT_NE(found, expected_value.end());
EXPECT_EQ(kv.second.ValueInt(), found->second.ValueInt());
}
break;
}
case PropertyValue::Type::Null:
ASSERT_FALSE("Invalid value type.");
}
++cnt;
}
}
/**
* A test fixture that contains a database, accessor,
* (label, property) index and 100 vertices, 10 for
* each of [0, 10) property values.
*/
class GraphDbAccessorIndexRange : public GraphDbAccessorIndex {
protected:
void SetUp() override {
dba.BuildIndex(label, property);
for (int i = 0; i < 100; i++) AddVertex(i / 10);
ASSERT_EQ(Count(dba.Vertices(false)), 0);
ASSERT_EQ(Count(dba.Vertices(true)), 100);
Commit();
ASSERT_EQ(Count(dba.Vertices(false)), 100);
}
auto Vertices(std::optional<utils::Bound<PropertyValue>> lower,
std::optional<utils::Bound<PropertyValue>> upper,
bool current_state = false) {
return dba.Vertices(label, property, lower, upper, current_state);
}
};
TEST_F(GraphDbAccessorIndexRange, RangeIteration) {
using std::nullopt;
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(7))), 80);
EXPECT_EQ(Count(Vertices(nullopt, Exclusive(7))), 70);
EXPECT_EQ(Count(Vertices(Inclusive(7), nullopt)), 30);
EXPECT_EQ(Count(Vertices(Exclusive(7), nullopt)), 20);
EXPECT_EQ(Count(Vertices(Exclusive(3), Exclusive(6))), 20);
EXPECT_EQ(Count(Vertices(Inclusive(3), Inclusive(6))), 40);
EXPECT_EQ(Count(Vertices(Inclusive(6), Inclusive(3))), 0);
::testing::FLAGS_gtest_death_test_style = "threadsafe";
EXPECT_DEATH(Vertices(nullopt, nullopt), "bound must be provided");
}
TEST_F(GraphDbAccessorIndexRange, RangeIterationCurrentState) {
using std::nullopt;
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(7))), 80);
for (int i = 0; i < 20; i++) AddVertex(2);
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(7))), 80);
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(7), true)), 100);
Commit();
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(7))), 100);
}
TEST_F(GraphDbAccessorIndexRange, RangeInterationIncompatibleTypes) {
using std::nullopt;
// using PropertyValue set to Null as a bound fails with an assertion
::testing::FLAGS_gtest_death_test_style = "threadsafe";
EXPECT_DEATH(Vertices(nullopt, Inclusive(PropertyValue())),
"not a valid index bound");
EXPECT_DEATH(Vertices(Inclusive(PropertyValue()), nullopt),
"not a valid index bound");
std::vector<PropertyValue> incompatible_with_int{
PropertyValue("string"), PropertyValue(true),
PropertyValue(std::vector<PropertyValue>{PropertyValue(1)})};
// using incompatible upper and lower bounds yields no results
EXPECT_EQ(Count(Vertices(Inclusive(2), Inclusive("string"))), 0);
// for incomparable bound and stored data,
// expect that no results are returned
ASSERT_EQ(Count(Vertices(Inclusive(0), nullopt)), 100);
for (PropertyValue value : incompatible_with_int) {
::testing::FLAGS_gtest_death_test_style = "threadsafe";
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(value))), 0)
<< "Found vertices of type int for predicate value type: "
<< value.type();
EXPECT_EQ(Count(Vertices(Inclusive(value), nullopt)), 0)
<< "Found vertices of type int for predicate value type: "
<< value.type();
}
// we can compare int to double
EXPECT_EQ(Count(Vertices(nullopt, Inclusive(1000.0))), 100);
EXPECT_EQ(Count(Vertices(Inclusive(0.0), nullopt)), 100);
}

View File

@ -1,76 +0,0 @@
#include <vector>
#include <gtest/gtest.h>
#include "storage/common/mvcc/version.hpp"
#include "storage/single_node/mvcc/record.hpp"
#include "storage/single_node/mvcc/version_list.hpp"
#include "transactions/single_node/engine.hpp"
#include "transactions/transaction.hpp"
#include "utils/thread/sync.hpp"
#include "mvcc_gc_common.hpp"
TEST(MVCC, Deadlock) {
tx::Engine engine;
auto t0 = engine.Begin();
mvcc::VersionList<Prop> version_list1(*t0, storage::Gid::FromInt(0));
mvcc::VersionList<Prop> version_list2(*t0, storage::Gid::FromInt(1));
engine.Commit(*t0);
auto t1 = engine.Begin();
auto t2 = engine.Begin();
version_list1.update(*t1);
version_list2.update(*t2);
EXPECT_THROW(version_list1.update(*t2), utils::LockTimeoutException);
}
// TODO Gleich: move this test to mvcc_gc???
// check that we don't delete records when we re-link
TEST(MVCC, UpdateDontDelete) {
std::atomic<int> count{0};
{
tx::Engine engine;
auto t1 = engine.Begin();
mvcc::VersionList<DestrCountRec> version_list(*t1, storage::Gid::FromInt(0),
count);
engine.Commit(*t1);
auto t2 = engine.Begin();
version_list.update(*t2);
engine.Abort(*t2);
EXPECT_EQ(count, 0);
auto t3 = engine.Begin();
// Update re-links the node and shouldn't clear it yet.
version_list.update(*t3);
EXPECT_EQ(count, 0);
// TODO Gleich: why don't we also test that remove doesn't delete?
engine.Commit(*t3);
}
EXPECT_EQ(count, 3);
}
// Check that we get the oldest record.
TEST(MVCC, Oldest) {
tx::Engine engine;
auto t1 = engine.Begin();
mvcc::VersionList<Prop> version_list(*t1, storage::Gid::FromInt(0));
auto first = version_list.Oldest();
EXPECT_NE(first, nullptr);
// TODO Gleich: no need to do 10 checks of the same thing
for (int i = 0; i < 10; ++i) {
engine.Advance(t1->id_);
version_list.update(*t1);
EXPECT_EQ(version_list.Oldest(), first);
}
// TODO Gleich: what about remove?
// TODO Gleich: here it might make sense to write a concurrent test
// since these ops rely heavily on linkage atomicity?
}
// TODO Gleich: perhaps some concurrent VersionList::find tests?

View File

@ -1,106 +0,0 @@
#include "mvcc_find_update_common.hpp"
TEST_F(Mvcc, FindUncommittedHigherTXUpdate) {
T2_FIND;
T3_BEGIN;
T3_UPDATE;
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, FindCommittedHigherTXUpdate) {
T2_FIND;
T3_BEGIN;
T3_UPDATE;
T3_COMMIT;
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, FindAbortedHigherTXUpdate) {
T2_FIND;
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, FindCommittedLowerTXUpdate) {
T2_UPDATE;
T3_BEGIN;
T3_FIND;
T2_COMMIT;
EXPECT_EQ(v3, version_list.find(*t3));
}
TEST_F(Mvcc, FindAbortedLowerTXUpdate) {
T2_UPDATE;
T3_BEGIN;
T3_FIND;
T2_ABORT;
EXPECT_EQ(v3, version_list.find(*t3));
}
TEST_F(Mvcc, FindUncommittedHigherTXRemove) {
T2_FIND;
T3_BEGIN;
T3_REMOVE;
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, FindCommittedHigherTXRemove) {
T2_FIND;
T3_BEGIN;
T3_REMOVE;
T3_COMMIT;
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, FindAbortedHigherTXRemove) {
T2_FIND;
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, FindCommittedLowerTXRemove) {
T2_REMOVE;
T3_BEGIN;
T3_FIND;
EXPECT_EQ(v3, version_list.find(*t3));
}
TEST_F(Mvcc, FindAbortedLowerTXRemove) {
T2_REMOVE;
T3_BEGIN;
T3_FIND;
T2_ABORT;
EXPECT_EQ(v3, version_list.find(*t3));
}
TEST_F(Mvcc, ReadUncommitedUpdateFromSameTXSameCommand) {
T2_UPDATE;
EXPECT_NE(v2, version_list.find(*t2));
}
TEST_F(Mvcc, ReadUncommitedUpdateFromSameTXNotSameCommand) {
T2_UPDATE;
engine.Advance(t2->id_);
EXPECT_EQ(v2, version_list.find(*t2));
}
TEST_F(Mvcc, ReadUncommitedRemoveFromSameTXSameCommand) {
T2_UPDATE;
T2_COMMIT;
T3_BEGIN;
T3_REMOVE;
EXPECT_EQ(v2, version_list.find(*t3));
}
TEST_F(Mvcc, ReadUncommitedRemoveFromSameTXNotSameCommand) {
T2_UPDATE;
T2_COMMIT;
T3_BEGIN;
T3_REMOVE;
engine.Advance(t3->id_);
EXPECT_NE(v2, version_list.find(*t3));
}

View File

@ -1,97 +0,0 @@
#include <vector>
#include <gtest/gtest.h>
#include "storage/common/mvcc/version.hpp"
#include "storage/single_node/mvcc/record.hpp"
#include "storage/single_node/mvcc/version_list.hpp"
#include "transactions/single_node/engine.hpp"
#include "transactions/transaction.hpp"
class TestClass : public mvcc::Record<TestClass> {
public:
// constructs first version, size should be 0
explicit TestClass(int &version_list_size) : version_list_size_(version_list_size) {
++version_list_size_;
}
TestClass *CloneData() { return new TestClass(version_list_size_); }
// version constructed in version list update
TestClass(TestClass &other) : version_list_size_(other.version_list_size_) {
version_list_size_++;
}
friend std::ostream &operator<<(std::ostream &stream, TestClass &test_class) {
stream << test_class.tx().cre << " " << test_class.tx().exp;
return stream;
}
// reference to variable version_list_size in test SetUp, increases when new
// TestClass is created
int &version_list_size_;
};
/**
* Testing mvcc::VersionList::find behavior in
* different situations (preceeding update/remove ops
* in different transactions).
*
* The setup for each case is:
* - transaction t1 has created a new version_list v1 and commited
* - transaction t2 has strated
* - *********************
* - here the test fixture ends and custom test behavior should be added
* - *********************
* - tests should check every legal sequence of the following ops
* - creation of transaction t3
* - [commit/abort] of [t2/t3]
* - [removal/update] on version_list by [t2/t3]
* - illegal sequences (for example double commit) don't have to be checked
*/
class Mvcc : public ::testing::Test {
protected:
virtual void SetUp() {
id0 = 0;
engine.Advance(t1->id_);
id1 = t1->id_;
v1 = version_list.find(*t1);
engine.Commit(*t1);
t2 = engine.Begin();
id2 = t2->id_;
}
// variable where number of versions is stored
int version_list_size = 0;
tx::Engine engine;
tx::Transaction *t1 = engine.Begin();
mvcc::VersionList<TestClass> version_list{*t1, storage::Gid::FromInt(0),
version_list_size};
TestClass *v1 = nullptr;
tx::Transaction *t2 = nullptr;
tx::TransactionId id0, id1, id2;
};
// helper macros. important:
// - TX_FIND and TX_UPDATE set the record variable vX
// - TX_BEGIN sets the transaction variable tX
#define T2_FIND __attribute__((unused)) auto v2 = version_list.find(*t2)
#define T3_FIND __attribute__((unused)) auto v3 = version_list.find(*t3)
#define T4_FIND __attribute__((unused)) auto v4 = version_list.find(*t4)
#define T2_UPDATE __attribute__((unused)) auto v2 = version_list.update(*t2)
#define T3_UPDATE __attribute__((unused)) auto v3 = version_list.update(*t3)
#define T2_COMMIT engine.Commit(*t2);
#define T3_COMMIT engine.Commit(*t3);
#define T2_ABORT engine.Abort(*t2);
#define T3_ABORT engine.Abort(*t3);
#define T3_BEGIN \
auto t3 = engine.Begin(); \
__attribute__((unused)) int id3 = t3->id_
#define T4_BEGIN auto t4 = engine.Begin();
#define T2_REMOVE version_list.remove(version_list.find(*t2), *t2)
#define T3_REMOVE version_list.remove(version_list.find(*t3), *t3)
#define EXPECT_CRE(record, expected) EXPECT_EQ(record->tx().cre, id##expected)
#define EXPECT_EXP(record, expected) EXPECT_EQ(record->tx().exp, id##expected)
#define EXPECT_NXT(v1, v2) EXPECT_EQ(v1->next(), v2)
#define EXPECT_SIZE(n) EXPECT_EQ(version_list_size, n)
// test the fixture
TEST_F(Mvcc, Fixture) {
EXPECT_CRE(v1, 1);
EXPECT_EXP(v1, 0);
}

View File

@ -1,163 +0,0 @@
#include <chrono>
#include <memory>
#include <thread>
#include <glog/logging.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "storage/single_node/mvcc/record.hpp"
#include "storage/single_node/mvcc/version_list.hpp"
#include "storage/single_node/garbage_collector.hpp"
#include "storage/single_node/vertex.hpp"
#include "transactions/single_node/engine.hpp"
#include "mvcc_gc_common.hpp"
class MvccGcTest : public ::testing::Test {
protected:
tx::Engine engine;
private:
tx::Transaction *t0 = engine.Begin();
protected:
std::atomic<int> record_destruction_count{0};
mvcc::VersionList<DestrCountRec> version_list{*t0, storage::Gid::FromInt(0),
record_destruction_count};
std::vector<tx::Transaction *> transactions{t0};
void SetUp() override { engine.Commit(*t0); }
void MakeUpdates(int update_count, bool commit) {
for (int i = 0; i < update_count; i++) {
auto t = engine.Begin();
version_list.update(*t);
if (commit)
engine.Commit(*t);
else
engine.Abort(*t);
}
}
auto GcDeleted(tx::Transaction *latest = nullptr) {
return version_list.GcDeleted(GcSnapshot(engine, latest), engine);
}
};
TEST_F(MvccGcTest, RemoveAndAbort) {
auto t = engine.Begin();
version_list.remove(version_list.find(*t), *t);
engine.Abort(*t);
auto ret = GcDeleted();
EXPECT_EQ(ret.first, false);
EXPECT_EQ(ret.second, nullptr);
EXPECT_EQ(record_destruction_count, 0);
}
TEST_F(MvccGcTest, UpdateAndAbort) {
MakeUpdates(1, false);
auto ret = GcDeleted();
EXPECT_EQ(ret.first, false);
EXPECT_EQ(ret.second, nullptr);
EXPECT_EQ(record_destruction_count, 0);
MakeUpdates(3, false);
ret = GcDeleted();
EXPECT_EQ(ret.first, false);
EXPECT_EQ(ret.second, nullptr);
EXPECT_EQ(record_destruction_count, 0);
}
TEST_F(MvccGcTest, RemoveAndCommit) {
auto t = engine.Begin();
version_list.remove(version_list.find(*t), *t);
engine.Commit(*t);
auto ret = GcDeleted();
EXPECT_EQ(ret.first, true);
EXPECT_NE(ret.second, nullptr);
delete ret.second;
EXPECT_EQ(record_destruction_count, 1);
}
TEST_F(MvccGcTest, UpdateAndCommit) {
MakeUpdates(4, true);
auto ret = GcDeleted();
EXPECT_EQ(ret.first, false);
EXPECT_NE(ret.second, nullptr);
delete ret.second;
EXPECT_EQ(record_destruction_count, 4);
}
TEST_F(MvccGcTest, OldestTransactionSnapshot) {
// this test validates that we can't delete
// a record that has been expired by a transaction (t1)
// committed before GC starts (when t2 is oldest),
// if t1 is in t2's snapshot.
// this is because there could exist transcation t3
// that also has t1 in it's snapshot, and consequently
// does not see the expiration and sees the record
auto t1 = engine.Begin();
auto t2 = engine.Begin();
version_list.remove(version_list.find(*t1), *t1);
engine.Commit(*t1);
auto ret = GcDeleted(t2);
EXPECT_EQ(ret.first, false);
EXPECT_EQ(ret.second, nullptr);
EXPECT_EQ(record_destruction_count, 0);
}
/**
* Test integration of garbage collector with MVCC GC. Delete version lists
* which are empty (not visible from any future transaction) from the skiplist.
*/
TEST(GarbageCollector, GcClean) {
ConcurrentMap<int64_t, mvcc::VersionList<DestrCountRec> *> collection;
tx::Engine engine;
DeferredDeleter<DestrCountRec> deleter;
DeferredDeleter<mvcc::VersionList<DestrCountRec>> vlist_deleter;
GarbageCollector<decltype(collection), DestrCountRec> gc(collection, deleter,
vlist_deleter);
// create a version list in transaction t1
auto t1 = engine.Begin();
std::atomic<int> record_destruction_count{0};
auto vl = new mvcc::VersionList<DestrCountRec>(*t1, storage::Gid::FromInt(0),
record_destruction_count);
auto access = collection.access();
access.insert(0, vl);
engine.Commit(*t1);
// run garbage collection that has nothing co collect
gc.Run(GcSnapshot(engine, nullptr), engine);
EXPECT_EQ(deleter.Count(), 0);
EXPECT_EQ(vlist_deleter.Count(), 0);
EXPECT_EQ(record_destruction_count, 0);
// delete the only record in the version-list in transaction t2
auto t2 = engine.Begin();
vl->remove(vl->find(*t2), *t2);
engine.Commit(*t2);
gc.Run(GcSnapshot(engine, nullptr), engine);
// check that we destroyed the record
EXPECT_EQ(deleter.Count(), 1);
deleter.FreeExpiredObjects(3);
EXPECT_EQ(deleter.Count(), 0);
EXPECT_EQ(record_destruction_count, 1);
// check that we destroyed the version list
EXPECT_EQ(vlist_deleter.Count(), 1);
vlist_deleter.FreeExpiredObjects(3);
EXPECT_EQ(vlist_deleter.Count(), 0);
EXPECT_EQ(access.size(), 0U);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
google::InitGoogleLogging(argv[0]);
return RUN_ALL_TESTS();
}

View File

@ -1,40 +0,0 @@
#pragma once
#include "storage/single_node/mvcc/record.hpp"
#include "transactions/single_node/engine.hpp"
/**
* @brief - Empty class which inherits from mvcc:Record.
*/
class Prop : public mvcc::Record<Prop> {
public:
Prop *CloneData() { return new Prop; }
};
/**
* @brief - Class which inherits from mvcc::Record and takes an atomic variable
* to count number of destructor calls (to test if the record is actually
* deleted).
*/
class DestrCountRec : public mvcc::Record<DestrCountRec> {
public:
explicit DestrCountRec(std::atomic<int> &count) : count_(count) {}
DestrCountRec *CloneData() { return new DestrCountRec(count_); }
~DestrCountRec() { ++count_; }
private:
std::atomic<int> &count_;
};
// helper function for creating a GC snapshot
// if given a nullptr it makes a GC snapshot like there
// are no active transactions
auto GcSnapshot(tx::Engine &engine, tx::Transaction *t) {
if (t != nullptr) {
tx::Snapshot gc_snap = t->snapshot();
gc_snap.insert(t->id_);
return gc_snap;
} else {
return engine.GlobalGcSnapshot();
}
}

View File

@ -1,116 +0,0 @@
#include "mvcc_find_update_common.hpp"
#undef T2_FIND
#define T2_FIND version_list.find(*t2)
#undef EXPECT_CRE
#undef EXPECT_EXP
#define EXPECT_CRE(record, transaction, command) \
EXPECT_EQ(record->tx().cre, id##transaction); \
EXPECT_EQ(record->cmd().cre, command)
#define EXPECT_EXP(record, transaction, command) \
EXPECT_EQ(record->tx().exp, id##transaction); \
EXPECT_EQ(record->cmd().exp, command)
// IMPORTANT: look definiton of EXPECT_CRE and EXPECT_EXP macros in
// tests/mvcc_find_update_common.hpp. Numbers in those macros represent
// transaction ids when transactions where created.
TEST_F(Mvcc, UpdateNotAdvanceUpdate) {
T2_UPDATE;
EXPECT_EQ(T2_FIND, v1);
auto v2_2 = version_list.update(*t2);
EXPECT_NXT(v2, v1);
EXPECT_EQ(v2, v2_2);
EXPECT_CRE(v2, 2, 1);
EXPECT_EXP(v2, 0, 0);
EXPECT_CRE(v1, 1, 1);
EXPECT_EXP(v1, 2, 1);
EXPECT_SIZE(2);
}
TEST_F(Mvcc, UpdateNotAdvanceRemove) {
T2_UPDATE;
EXPECT_EQ(T2_FIND, v1);
T2_REMOVE;
EXPECT_NXT(v2, v1);
EXPECT_CRE(v2, 2, 1);
EXPECT_EXP(v2, 0, 0);
EXPECT_CRE(v1, 1, 1);
EXPECT_EXP(v1, 2, 1);
EXPECT_SIZE(2);
}
TEST_F(Mvcc, RemoveNotAdvanceUpdate) {
T2_REMOVE;
EXPECT_EQ(T2_FIND, v1);
T2_UPDATE;
EXPECT_NXT(v2, v1);
EXPECT_CRE(v2, 2, 1);
EXPECT_EXP(v2, 0, 0);
EXPECT_CRE(v1, 1, 1);
EXPECT_EXP(v1, 2, 1);
EXPECT_SIZE(2);
}
TEST_F(Mvcc, RemoveNotAdvanceRemove) {
T2_REMOVE;
EXPECT_EQ(T2_FIND, v1);
T2_REMOVE;
EXPECT_CRE(v1, 1, 1);
EXPECT_EXP(v1, 2, 1);
EXPECT_SIZE(1);
}
TEST_F(Mvcc, UpdateAdvanceUpdate) {
T2_UPDATE;
EXPECT_EQ(T2_FIND, v1);
engine.Advance(t2->id_);
EXPECT_EQ(T2_FIND, v2);
auto v2_2 = version_list.update(*t2);
EXPECT_NXT(v2, v1);
EXPECT_NXT(v2_2, v2);
EXPECT_CRE(v2, 2, 1);
EXPECT_EXP(v2, 2, 2);
EXPECT_CRE(v2_2, 2, 2);
EXPECT_EXP(v2_2, 0, 0);
EXPECT_CRE(v1, 1, 1);
EXPECT_EXP(v1, 2, 1);
EXPECT_SIZE(3);
}
TEST_F(Mvcc, UpdateAdvanceRemove) {
T2_UPDATE;
EXPECT_EQ(T2_FIND, v1);
engine.Advance(t2->id_);
EXPECT_EQ(T2_FIND, v2);
T2_REMOVE;
EXPECT_NXT(v2, v1);
EXPECT_CRE(v2, 2, 1);
EXPECT_EXP(v2, 2, 2);
EXPECT_CRE(v1, 1, 1);
EXPECT_EXP(v1, 2, 1);
EXPECT_SIZE(2);
}
TEST_F(Mvcc, RemoveAdvanceUpdate) {
T2_REMOVE;
EXPECT_EQ(T2_FIND, v1);
engine.Advance(t2->id_);
EXPECT_EQ(T2_FIND, nullptr);
EXPECT_DEATH(T2_UPDATE, ".*nullptr.*");
}
TEST_F(Mvcc, RemoveAdvanceRemove) {
T2_REMOVE;
EXPECT_EQ(T2_FIND, v1);
engine.Advance(t2->id_);
EXPECT_EQ(T2_FIND, nullptr);
EXPECT_FALSE(T2_FIND);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
::testing::FLAGS_gtest_death_test_style = "threadsafe";
return RUN_ALL_TESTS();
}

View File

@ -1,564 +0,0 @@
#include "mvcc_find_update_common.hpp"
#undef T4_FIND
#define T4_FIND version_list.find(*t4)
#undef T3_FIND
#define T3_FIND version_list.find(*t3)
// IMPORTANT: look definiton of EXPECT_CRE and EXPECT_EXP macros in
// tests/mvcc_find_update_common.hpp. Numbers in those macros represent
// transaction ids when transactions where created.
// ****************************************************************
// * CASE 1: T3 starts after T2 ends.
// *
// * T2: START---OP---END
// *
// * T3: START---OP---END
// *
// * T4: START---FIND---END
// ****************************************************************
TEST_F(Mvcc, UpdCmtUpdCmt1) {
T2_UPDATE;
T2_COMMIT;
T3_BEGIN;
T3_UPDATE;
T3_COMMIT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 3);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v2);
EXPECT_NXT(v2, v1);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v3);
}
TEST_F(Mvcc, UpdCmtRemCmt1) {
T2_UPDATE;
T2_COMMIT;
T3_BEGIN;
T3_REMOVE;
T3_COMMIT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 3);
EXPECT_NXT(v2, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, RemCmtUpdCmt1) {
T2_REMOVE;
T2_COMMIT;
T3_BEGIN;
EXPECT_DEATH(T3_UPDATE, ".*nullptr.*");
}
TEST_F(Mvcc, RemCmtRemCmt1) {
T2_REMOVE;
T2_COMMIT;
T3_BEGIN;
EXPECT_FALSE(T3_FIND);
}
TEST_F(Mvcc, UpdCmtUpdAbt1) {
T2_UPDATE;
T2_COMMIT;
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 3);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v2, v1);
EXPECT_NXT(v3, v2);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v2);
}
TEST_F(Mvcc, UpdCmtRemAbt1) {
T2_UPDATE;
T2_COMMIT;
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 3);
EXPECT_NXT(v2, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v2);
}
TEST_F(Mvcc, RemCmtUpdAbt1) {
T2_REMOVE;
T2_COMMIT;
T3_BEGIN;
EXPECT_DEATH(T3_UPDATE, ".*nullptr.*");
}
TEST_F(Mvcc, RemCmtRemAbt1) {
T2_REMOVE;
T2_COMMIT;
T3_BEGIN;
EXPECT_FALSE(T3_FIND);
}
TEST_F(Mvcc, UpdAbtUpdCmt1) {
T2_UPDATE;
T2_ABORT;
T3_BEGIN;
T3_UPDATE;
T3_COMMIT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v2);
EXPECT_NXT(v2, v1);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v3);
}
TEST_F(Mvcc, UpdAbtRemCmt1) {
T2_UPDATE;
T2_ABORT;
T3_BEGIN;
T3_REMOVE;
T3_COMMIT;
EXPECT_NXT(v2, v1);
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, RemAbtUpdCmt1) {
T2_REMOVE;
T2_ABORT;
T3_BEGIN;
T3_UPDATE;
T3_COMMIT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v3);
}
TEST_F(Mvcc, RemAbtRemCmt1) {
T2_REMOVE;
T2_ABORT;
T3_BEGIN;
T3_REMOVE;
T3_COMMIT;
EXPECT_EXP(v1, 3);
EXPECT_SIZE(1);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, UpdAbtUpdAbt1) {
T2_UPDATE;
T2_ABORT;
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v2, v1);
EXPECT_NXT(v3, v2);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, UpdAbtRemAbt1) {
T2_UPDATE;
T2_ABORT;
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
EXPECT_NXT(v2, v1);
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, RemAbtUpdAbt1) {
T2_REMOVE;
T2_ABORT;
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, RemAbtRemAbt1) {
T2_REMOVE;
T2_ABORT;
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
EXPECT_EXP(v1, 3);
EXPECT_SIZE(1);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
// ****************************************************************
// * CASE 2: T3 starts before T2 ends.
// *
// * T2: START---OP---END
// *
// * T3: START---------OP---END
// *
// * T4: START---FIND---END
// *
// ****************************************************************
// ****************************
// COVERS 8 cases!
TEST_F(Mvcc, UpdCmtUpd2) {
T2_UPDATE;
T3_BEGIN;
T2_COMMIT;
EXPECT_THROW(T3_UPDATE, mvcc::SerializationError);
}
TEST_F(Mvcc, UpdCmtRem2) {
T2_UPDATE;
T3_BEGIN;
T2_COMMIT;
EXPECT_THROW(T3_REMOVE, mvcc::SerializationError);
}
TEST_F(Mvcc, RemCmtUpd2) {
T2_REMOVE;
T3_BEGIN;
T2_COMMIT;
EXPECT_THROW(T3_UPDATE, mvcc::SerializationError);
}
TEST_F(Mvcc, RemCmtRem2) {
T2_REMOVE;
T3_BEGIN;
T2_COMMIT;
EXPECT_THROW(T3_REMOVE, mvcc::SerializationError);
}
// **************************
TEST_F(Mvcc, UpdAbtUpdCmt2) {
T2_UPDATE;
T3_BEGIN;
T2_ABORT;
T3_UPDATE;
T3_COMMIT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v2);
EXPECT_NXT(v2, v1);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v3);
}
TEST_F(Mvcc, UpdAbtRemCmt2) {
T2_UPDATE;
T3_BEGIN;
T2_ABORT;
T3_REMOVE;
T3_COMMIT;
EXPECT_NXT(v2, v1);
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, RemAbtUpdCmt2) {
T2_REMOVE;
T3_BEGIN;
T2_ABORT;
T3_UPDATE;
T3_COMMIT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v3);
}
TEST_F(Mvcc, RemAbtRemCmt2) {
T2_REMOVE;
T3_BEGIN;
T2_ABORT;
T3_REMOVE;
T3_COMMIT;
EXPECT_EXP(v1, 3);
EXPECT_SIZE(1);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, UpdAbtUpdAbt2) {
T2_UPDATE;
T3_BEGIN;
T2_ABORT;
T3_UPDATE;
T3_ABORT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v2, 0);
EXPECT_NXT(v2, v1);
EXPECT_NXT(v3, v2);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, UpdAbtRemAbt2) {
T2_UPDATE;
T3_BEGIN;
T2_ABORT;
T3_REMOVE;
T3_ABORT;
EXPECT_NXT(v2, v1);
EXPECT_EXP(v1, 3);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, RemAbtUpdAbt2) {
T2_REMOVE;
T3_BEGIN;
T2_ABORT;
T3_UPDATE;
T3_ABORT;
EXPECT_EXP(v1, 3);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, RemAbtRemAbt2) {
T2_REMOVE;
T3_BEGIN;
T2_ABORT;
T3_REMOVE;
T3_ABORT;
EXPECT_EXP(v1, 3);
EXPECT_SIZE(1);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
// ****************************************************************
// * CASE 3: T3 ends before T2 starts executing operations.
// *
// * T2: START--------------------OP---END
// *
// * T3: START---OP---END
// *
// * T4: START---FIND---END
// *
// ****************************************************************
// ****************************
// COVERS 8 cases!
TEST_F(Mvcc, UpdUpdCmt3) {
T3_BEGIN;
T3_UPDATE;
T3_COMMIT;
EXPECT_THROW(T2_UPDATE, mvcc::SerializationError);
}
TEST_F(Mvcc, UpdRemCmt3) {
T3_BEGIN;
T3_REMOVE;
T3_COMMIT;
EXPECT_THROW(T2_UPDATE, mvcc::SerializationError);
}
TEST_F(Mvcc, RemUpdCmt3) {
T3_BEGIN;
T3_UPDATE;
T3_COMMIT;
EXPECT_THROW(T2_REMOVE, mvcc::SerializationError);
}
TEST_F(Mvcc, RemRemCmt3) {
T3_BEGIN;
T3_REMOVE;
T3_COMMIT;
EXPECT_THROW(T2_REMOVE, mvcc::SerializationError);
}
// **************************
TEST_F(Mvcc, UpdCmtUpdAbt3) {
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
T2_UPDATE;
T2_COMMIT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_NXT(v2, v3);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v2);
}
TEST_F(Mvcc, UpdCmtRemAbt3) {
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
T2_UPDATE;
T2_COMMIT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_NXT(v2, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v2);
}
TEST_F(Mvcc, RemCmtUpdAbt3) {
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
T2_REMOVE;
T2_COMMIT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, RemCmtRemAbt3) {
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
T2_REMOVE;
T2_COMMIT;
EXPECT_EXP(v1, 2);
EXPECT_SIZE(1);
T4_BEGIN;
EXPECT_EQ(T4_FIND, nullptr);
}
TEST_F(Mvcc, UpdAbtUpdAbt3) {
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
T2_UPDATE;
T2_ABORT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_NXT(v2, v3);
EXPECT_SIZE(3);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, UpdAbtRemAbt3) {
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
T2_UPDATE;
T2_ABORT;
EXPECT_NXT(v2, v1);
EXPECT_EXP(v1, 2);
EXPECT_CRE(v2, 2);
EXPECT_EXP(v2, 0);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, RemAbtUpdAbt3) {
T3_BEGIN;
T3_UPDATE;
T3_ABORT;
T2_REMOVE;
T2_ABORT;
EXPECT_EXP(v1, 2);
EXPECT_CRE(v3, 3);
EXPECT_EXP(v3, 0);
EXPECT_NXT(v3, v1);
EXPECT_SIZE(2);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
TEST_F(Mvcc, RemAbtRemAbt3) {
T3_BEGIN;
T3_REMOVE;
T3_ABORT;
T2_REMOVE;
T2_ABORT;
EXPECT_EXP(v1, 2);
EXPECT_SIZE(1);
T4_BEGIN;
EXPECT_EQ(T4_FIND, v1);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
::testing::FLAGS_gtest_death_test_style = "threadsafe";
return RUN_ALL_TESTS();
}

View File

@ -1,60 +0,0 @@
#include <glog/logging.h>
#include <gtest/gtest.h>
#include "storage/common/pod_buffer.hpp"
class PODBufferTest : public ::testing::Test {
protected:
storage::PODBuffer buffer_;
void SetUp() override { buffer_ = storage::PODBuffer(""); }
void Write(const uint8_t *data, size_t len) { buffer_.Write(data, len); }
bool Read(uint8_t *data, size_t len) { return buffer_.Read(data, len); }
};
TEST_F(PODBufferTest, ReadEmpty) {
uint8_t data[10];
ASSERT_TRUE(Read(data, 0));
for (int i = 1; i <= 5; ++i) ASSERT_FALSE(Read(data, i));
}
TEST_F(PODBufferTest, ReadNonEmpty) {
uint8_t input_data[10];
uint8_t output_data[10];
for (int i = 0; i < 10; ++i) input_data[i] = i;
Write(input_data, 10);
ASSERT_TRUE(Read(output_data, 10));
for (int i = 0; i < 10; ++i) ASSERT_EQ(output_data[i], i);
ASSERT_FALSE(Read(output_data, 1));
}
TEST_F(PODBufferTest, WriteRead) {
uint8_t input_data[10];
uint8_t output_data[10];
for (int i = 0; i < 10; ++i) input_data[i] = i;
Write(input_data, 10);
ASSERT_TRUE(Read(output_data, 5));
for (int i = 0; i < 5; ++i) ASSERT_EQ(output_data[i], i);
ASSERT_TRUE(Read(output_data, 5));
for (int i = 0; i < 5; ++i) ASSERT_EQ(output_data[i], i + 5);
ASSERT_FALSE(Read(output_data, 1));
Write(input_data + 5, 5);
ASSERT_TRUE(Read(output_data, 5));
for (int i = 0; i < 5; ++i) ASSERT_EQ(output_data[i], i + 5);
ASSERT_FALSE(Read(output_data, 1));
}

View File

@ -1,385 +0,0 @@
#include <unistd.h>
#include <vector>
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include "storage/common/types/property_value.hpp"
#include "storage/common/types/property_value_store.hpp"
#include "utils/file.hpp"
using Location = storage::Location;
namespace fs = std::filesystem;
DECLARE_string(durability_directory);
DECLARE_string(properties_on_disk);
class PropertyValueStoreTest : public ::testing::Test {
protected:
PropertyValueStore props_;
void SetUp() override {
// we need this to test the copy constructor
FLAGS_properties_on_disk = "not empty";
auto durability_path = fs::temp_directory_path() /
("unit_property_value_store_durability_" +
std::to_string(static_cast<int>(getpid())));
FLAGS_durability_directory = durability_path.string();
utils::EnsureDir(fs::path(FLAGS_durability_directory));
}
template <class TValue>
void Set(int key, Location location, const TValue &value) {
props_.set(storage::Property(key, location), PropertyValue(value));
}
PropertyValue At(int key, Location location) {
return props_.at(storage::Property(key, location));
}
auto Erase(int key, Location location) {
return props_.erase(storage::Property(key, location));
}
auto Begin() { return props_.begin(); }
auto End() { return props_.end(); }
void TearDown() override {
props_.clear();
fs::remove_all(fs::path(FLAGS_durability_directory));
}
};
TEST_F(PropertyValueStoreTest, AtMemory) {
std::string some_string = "something";
EXPECT_EQ(PropertyValue(At(0, Location::Memory)).type(),
PropertyValue::Type::Null);
Set(0, Location::Memory, some_string);
EXPECT_EQ(PropertyValue(At(0, Location::Memory)).ValueString(),
some_string);
Set(120, Location::Memory, 42);
EXPECT_EQ(PropertyValue(At(120, Location::Memory)).ValueInt(), 42);
}
TEST_F(PropertyValueStoreTest, AtDisk) {
std::string some_string = "something";
EXPECT_EQ(PropertyValue(At(0, Location::Disk)).type(),
PropertyValue::Type::Null);
Set(0, Location::Disk, some_string);
EXPECT_EQ(PropertyValue(At(0, Location::Disk)).ValueString(),
some_string);
Set(120, Location::Disk, 42);
EXPECT_EQ(PropertyValue(At(120, Location::Disk)).ValueInt(), 42);
}
TEST_F(PropertyValueStoreTest, AtNull) {
EXPECT_EQ(At(0, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_EQ(At(100, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_EQ(At(0, Location::Disk).type(), PropertyValue::Type::Null);
EXPECT_EQ(At(100, Location::Disk).type(), PropertyValue::Type::Null);
// set one prop and test it's not null
Set(0, Location::Memory, true);
EXPECT_NE(At(0, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_EQ(At(100, Location::Memory).type(), PropertyValue::Type::Null);
Set(0, Location::Disk, true);
EXPECT_NE(At(0, Location::Disk).type(), PropertyValue::Type::Null);
EXPECT_EQ(At(100, Location::Disk).type(), PropertyValue::Type::Null);
}
TEST_F(PropertyValueStoreTest, SetNull) {
Set(11, Location::Memory, PropertyValue());
EXPECT_EQ(0, props_.size());
Set(100, Location::Disk, PropertyValue());
EXPECT_EQ(0, props_.size());
}
TEST_F(PropertyValueStoreTest, RemoveMemory) {
// set some props
Set(11, Location::Memory, "a");
Set(30, Location::Memory, "b");
EXPECT_NE(At(11, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_NE(At(30, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_EQ(props_.size(), 2);
Erase(11, Location::Memory);
EXPECT_EQ(props_.size(), 1);
EXPECT_EQ(At(11, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_EQ(Erase(30, Location::Memory), 1);
EXPECT_EQ(props_.size(), 0);
EXPECT_EQ(At(30, Location::Memory).type(), PropertyValue::Type::Null);
EXPECT_EQ(Erase(1000, Location::Memory), 1);
}
TEST_F(PropertyValueStoreTest, RemoveDisk) {
// set some props
Set(11, Location::Disk, "a");
Set(30, Location::Disk, "b");
EXPECT_NE(At(11, Location::Disk).type(), PropertyValue::Type::Null);
EXPECT_NE(At(30, Location::Disk).type(), PropertyValue::Type::Null);
EXPECT_EQ(props_.size(), 2);
Erase(11, Location::Disk);
EXPECT_EQ(props_.size(), 1);
EXPECT_EQ(At(11, Location::Disk).type(), PropertyValue::Type::Null);
EXPECT_EQ(Erase(30, Location::Disk), 1);
EXPECT_EQ(props_.size(), 0);
EXPECT_EQ(At(30, Location::Disk).type(), PropertyValue::Type::Null);
EXPECT_EQ(Erase(1000, Location::Disk), 1);
}
TEST_F(PropertyValueStoreTest, ClearMemory) {
EXPECT_EQ(props_.size(), 0);
Set(11, Location::Memory, "a");
Set(30, Location::Memory, "b");
EXPECT_EQ(props_.size(), 2);
}
TEST_F(PropertyValueStoreTest, ClearDisk) {
EXPECT_EQ(props_.size(), 0);
Set(11, Location::Disk, "a");
Set(30, Location::Disk, "b");
EXPECT_EQ(props_.size(), 2);
}
TEST_F(PropertyValueStoreTest, ReplaceMemory) {
Set(10, Location::Memory, 42);
EXPECT_EQ(At(10, Location::Memory).ValueInt(), 42);
Set(10, Location::Memory, 0.25f);
EXPECT_EQ(At(10, Location::Memory).type(), PropertyValue::Type::Double);
EXPECT_FLOAT_EQ(At(10, Location::Memory).ValueDouble(), 0.25);
}
TEST_F(PropertyValueStoreTest, ReplaceDisk) {
Set(10, Location::Disk, 42);
EXPECT_EQ(At(10, Location::Disk).ValueInt(), 42);
Set(10, Location::Disk, 0.25f);
EXPECT_EQ(At(10, Location::Disk).type(), PropertyValue::Type::Double);
EXPECT_FLOAT_EQ(At(10, Location::Disk).ValueDouble(), 0.25);
}
TEST_F(PropertyValueStoreTest, SizeMemory) {
EXPECT_EQ(props_.size(), 0);
Set(0, Location::Memory, "something");
EXPECT_EQ(props_.size(), 1);
Set(0, Location::Memory, true);
EXPECT_EQ(props_.size(), 1);
Set(1, Location::Memory, true);
EXPECT_EQ(props_.size(), 2);
for (int i = 0; i < 100; ++i) Set(i + 20, Location::Memory, true);
EXPECT_EQ(props_.size(), 102);
Erase(0, Location::Memory);
EXPECT_EQ(props_.size(), 101);
Erase(0, Location::Memory);
EXPECT_EQ(props_.size(), 101);
Erase(1, Location::Memory);
EXPECT_EQ(props_.size(), 100);
}
TEST_F(PropertyValueStoreTest, SizeDisk) {
EXPECT_EQ(props_.size(), 0);
Set(0, Location::Disk, "something");
EXPECT_EQ(props_.size(), 1);
Set(0, Location::Disk, true);
EXPECT_EQ(props_.size(), 1);
Set(1, Location::Disk, true);
EXPECT_EQ(props_.size(), 2);
for (int i = 0; i < 100; ++i) Set(i + 20, Location::Disk, true);
EXPECT_EQ(props_.size(), 102);
Erase(0, Location::Disk);
EXPECT_EQ(props_.size(), 101);
Erase(0, Location::Disk);
EXPECT_EQ(props_.size(), 101);
Erase(1, Location::Disk);
EXPECT_EQ(props_.size(), 100);
}
TEST_F(PropertyValueStoreTest, Size) {
EXPECT_EQ(props_.size(), 0);
for (int i = 0; i < 100; ++i) Set(i, Location::Disk, true);
EXPECT_EQ(props_.size(), 100);
for (int i = 0; i < 200; ++i) Set(i + 100, Location::Memory, true);
EXPECT_EQ(props_.size(), 300);
Erase(0, Location::Disk);
EXPECT_EQ(props_.size(), 299);
Erase(99, Location::Disk);
EXPECT_EQ(props_.size(), 298);
Erase(100, Location::Memory);
EXPECT_EQ(props_.size(), 297);
Erase(299, Location::Memory);
EXPECT_EQ(props_.size(), 296);
}
TEST_F(PropertyValueStoreTest, InsertRetrieveListMemory) {
Set(0, Location::Memory,
std::vector<PropertyValue>{PropertyValue(1), PropertyValue(true),
PropertyValue(2.5), PropertyValue("something"),
PropertyValue()});
auto p = At(0, Location::Memory);
EXPECT_EQ(p.type(), PropertyValue::Type::List);
auto l = p.ValueList();
EXPECT_EQ(l.size(), 5);
EXPECT_EQ(l[0].type(), PropertyValue::Type::Int);
EXPECT_EQ(l[0].ValueInt(), 1);
EXPECT_EQ(l[1].type(), PropertyValue::Type::Bool);
EXPECT_EQ(l[1].ValueBool(), true);
EXPECT_EQ(l[2].type(), PropertyValue::Type::Double);
EXPECT_EQ(l[2].ValueDouble(), 2.5);
EXPECT_EQ(l[3].type(), PropertyValue::Type::String);
EXPECT_EQ(l[3].ValueString(), "something");
EXPECT_EQ(l[4].type(), PropertyValue::Type::Null);
}
TEST_F(PropertyValueStoreTest, InsertRetrieveListDisk) {
Set(0, Location::Disk,
std::vector<PropertyValue>{PropertyValue(1), PropertyValue(true),
PropertyValue(2.5), PropertyValue("something"),
PropertyValue()});
auto p = At(0, Location::Disk);
EXPECT_EQ(p.type(), PropertyValue::Type::List);
auto l = p.ValueList();
EXPECT_EQ(l.size(), 5);
EXPECT_EQ(l[0].type(), PropertyValue::Type::Int);
EXPECT_EQ(l[0].ValueInt(), 1);
EXPECT_EQ(l[1].type(), PropertyValue::Type::Bool);
EXPECT_EQ(l[1].ValueBool(), true);
EXPECT_EQ(l[2].type(), PropertyValue::Type::Double);
EXPECT_EQ(l[2].ValueDouble(), 2.5);
EXPECT_EQ(l[3].type(), PropertyValue::Type::String);
EXPECT_EQ(l[3].ValueString(), "something");
EXPECT_EQ(l[4].type(), PropertyValue::Type::Null);
}
TEST_F(PropertyValueStoreTest, InsertRetrieveMap) {
Set(0, Location::Memory,
std::map<std::string, PropertyValue>{{"a", PropertyValue(1)},
{"b", PropertyValue(true)},
{"c", PropertyValue("something")}});
auto p = At(0, Location::Memory);
EXPECT_EQ(p.type(), PropertyValue::Type::Map);
auto m = p.ValueMap();
EXPECT_EQ(m.size(), 3);
auto get = [&m](const std::string &prop_name) {
return m.find(prop_name)->second;
};
EXPECT_EQ(get("a").type(), PropertyValue::Type::Int);
EXPECT_EQ(get("a").ValueInt(), 1);
EXPECT_EQ(get("b").type(), PropertyValue::Type::Bool);
EXPECT_EQ(get("b").ValueBool(), true);
EXPECT_EQ(get("c").type(), PropertyValue::Type::String);
EXPECT_EQ(get("c").ValueString(), "something");
}
TEST_F(PropertyValueStoreTest, InsertRetrieveMapDisk) {
Set(0, Location::Disk,
std::map<std::string, PropertyValue>{{"a", PropertyValue(1)},
{"b", PropertyValue(true)},
{"c", PropertyValue("something")}});
auto p = At(0, Location::Disk);
EXPECT_EQ(p.type(), PropertyValue::Type::Map);
auto m = p.ValueMap();
EXPECT_EQ(m.size(), 3);
auto get = [&m](const std::string &prop_name) {
return m.find(prop_name)->second;
};
EXPECT_EQ(get("a").type(), PropertyValue::Type::Int);
EXPECT_EQ(get("a").ValueInt(), 1);
EXPECT_EQ(get("b").type(), PropertyValue::Type::Bool);
EXPECT_EQ(get("b").ValueBool(), true);
EXPECT_EQ(get("c").type(), PropertyValue::Type::String);
EXPECT_EQ(get("c").ValueString(), "something");
}
TEST_F(PropertyValueStoreTest, Iterator) {
Set(0, Location::Memory, "a");
Set(1, Location::Memory, 1);
Set(2, Location::Disk, "b");
Set(3, Location::Disk, 2);
auto it = Begin();
ASSERT_TRUE(it != End());
EXPECT_EQ(it->first.Id(), 0);
EXPECT_EQ((*it).second.ValueString(), "a");
++it;
ASSERT_TRUE(it != End());
EXPECT_EQ((*it).first.Id(), 1);
EXPECT_EQ(it->second.ValueInt(), 1);
++it;
ASSERT_TRUE(it != End());
EXPECT_EQ(it->first.Id(), 2);
EXPECT_EQ((*it).second.ValueString(), "b");
++it;
ASSERT_TRUE(it != End());
EXPECT_EQ((*it).first.Id(), 3);
EXPECT_EQ(it->second.ValueInt(), 2);
++it;
ASSERT_TRUE(it == End());
}
TEST_F(PropertyValueStoreTest, CopyConstructor) {
PropertyValueStore props;
for (int i = 1; i <= 3; ++i)
props.set(storage::Property(i, Location::Memory),
PropertyValue("mem_" + std::to_string(i)));
for (int i = 4; i <= 5; ++i)
props.set(storage::Property(i, Location::Disk),
PropertyValue("disk_" + std::to_string(i)));
PropertyValueStore new_props = props;
for (int i = 1; i <= 3; ++i)
EXPECT_EQ(
new_props.at(storage::Property(i, Location::Memory)).ValueString(),
"mem_" + std::to_string(i));
for (int i = 4; i <= 5; ++i)
EXPECT_EQ(new_props.at(storage::Property(i, Location::Disk)).ValueString(),
"disk_" + std::to_string(i));
props.set(storage::Property(1, Location::Memory),
PropertyValue("mem_1_update"));
EXPECT_EQ(new_props.at(storage::Property(1, Location::Memory)).ValueString(),
"mem_1");
new_props.set(storage::Property(2, Location::Memory),
PropertyValue("mem_2_update"));
EXPECT_EQ(props.at(storage::Property(2, Location::Memory)).ValueString(),
"mem_2");
props.set(storage::Property(4, Location::Disk),
PropertyValue("disk_4_update"));
EXPECT_EQ(new_props.at(storage::Property(4, Location::Disk)).ValueString(),
"disk_4");
new_props.set(storage::Property(5, Location::Disk),
PropertyValue("disk_5_update"));
EXPECT_EQ(props.at(storage::Property(5, Location::Disk)).ValueString(),
"disk_5");
}

View File

@ -1,298 +0,0 @@
#include <set>
#include <vector>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "storage/single_node/mvcc/version_list.hpp"
#include "storage/common/types/property_value.hpp"
#include "storage/single_node/edge_accessor.hpp"
#include "storage/single_node/vertex.hpp"
#include "storage/single_node/vertex_accessor.hpp"
TEST(RecordAccessor, Properties) {
database::GraphDb db;
auto dba = db.Access();
auto vertex = dba.InsertVertex();
auto &properties = vertex.Properties();
auto property = dba.Property("PropName");
auto property_other = dba.Property("Other");
EXPECT_EQ(vertex.PropsAt(property).type(), PropertyValue::Type::Null);
vertex.PropsSet(property, PropertyValue(42));
EXPECT_EQ(vertex.PropsAt(property).ValueInt(), 42);
EXPECT_EQ(properties.at(property).ValueInt(), 42);
EXPECT_EQ(vertex.PropsAt(property_other).type(), PropertyValue::Type::Null);
EXPECT_EQ(properties.at(property_other).type(), PropertyValue::Type::Null);
vertex.PropsErase(property);
EXPECT_EQ(vertex.PropsAt(property).type(), PropertyValue::Type::Null);
EXPECT_EQ(properties.at(property).type(), PropertyValue::Type::Null);
}
TEST(RecordAccessor, DbAccessor) {
database::GraphDb db;
auto dba = db.Access();
auto vertex = dba.InsertVertex();
const auto &const_vertex_dba = vertex.db_accessor();
EXPECT_EQ(&dba, &const_vertex_dba);
auto &vertex_dba = vertex.db_accessor();
EXPECT_EQ(&dba, &vertex_dba);
}
TEST(RecordAccessor, RecordEquality) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
EXPECT_EQ(v1, v1);
EXPECT_NE(v1, v2);
auto e1 = dba.InsertEdge(v1, v2, dba.EdgeType("type"));
auto e2 = dba.InsertEdge(v1, v2, dba.EdgeType("type"));
EXPECT_EQ(e1, e1);
EXPECT_NE(e1, e2);
}
TEST(RecordAccessor, SwitchOldAndSwitchNewMemberFunctionTest) {
database::GraphDb db;
// test both Switches work on new record
{
auto dba = db.Access();
auto v1 = dba.InsertVertex();
v1.SwitchOld();
v1.SwitchNew();
dba.Commit();
}
// test both Switches work on existing record
{
auto dba = db.Access();
auto v1 = *dba.Vertices(false).begin();
v1.SwitchOld();
v1.SwitchNew();
}
// ensure switch exposes the right data
{
auto dba = db.Access();
auto label = dba.Label("label");
auto v1 = *dba.Vertices(false).begin();
EXPECT_FALSE(v1.has_label(label)); // old record
v1.add_label(label); // modifying data does not switch to new
EXPECT_FALSE(v1.has_label(label)); // old record
v1.SwitchNew();
EXPECT_TRUE(v1.has_label(label));
v1.SwitchOld();
EXPECT_FALSE(v1.has_label(label));
}
}
TEST(RecordAccessor, Reconstruct) {
database::GraphDb db;
auto label = db.Access().Label("label");
{
// we must operate on an old vertex
// because otherwise we only have new
// so create a vertex and commit it
auto dba = db.Access();
dba.InsertVertex();
dba.Commit();
}
// ensure we don't have label set
auto dba = db.Access();
auto v1 = *dba.Vertices(false).begin();
v1.SwitchNew();
EXPECT_FALSE(v1.has_label(label));
{
// update the record through a different accessor
auto v1_other_accessor = *dba.Vertices(false).begin();
v1_other_accessor.add_label(label);
EXPECT_FALSE(v1.has_label(label));
v1_other_accessor.SwitchNew();
EXPECT_TRUE(v1_other_accessor.has_label(label));
}
EXPECT_FALSE(v1.has_label(label));
v1.Reconstruct();
v1.SwitchNew();
EXPECT_TRUE(v1.has_label(label));
}
TEST(RecordAccessor, VertexLabels) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto &labels = v1.labels();
EXPECT_EQ(v1.labels().size(), 0);
storage::Label l1 = dba.Label("label1");
storage::Label l2 = dba.Label("label2");
// adding labels
EXPECT_FALSE(v1.has_label(l1));
v1.add_label(l1);
EXPECT_TRUE(v1.has_label(l1));
EXPECT_EQ(v1.labels().size(), 1);
EXPECT_EQ(labels.size(), 1);
v1.add_label(l1);
EXPECT_EQ(v1.labels().size(), 1);
EXPECT_EQ(labels.size(), 1);
EXPECT_FALSE(v1.has_label(l2));
v1.add_label(l2);
EXPECT_TRUE(v1.has_label(l2));
EXPECT_EQ(v1.labels().size(), 2);
EXPECT_EQ(labels.size(), 2);
// removing labels
storage::Label l3 = dba.Label("label3");
v1.remove_label(l3);
EXPECT_EQ(labels.size(), 2);
v1.remove_label(l1);
EXPECT_FALSE(v1.has_label(l1));
EXPECT_EQ(v1.labels().size(), 1);
v1.remove_label(l1);
EXPECT_TRUE(v1.has_label(l2));
}
TEST(RecordAccessor, EdgeType) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
storage::EdgeType likes = dba.EdgeType("likes");
storage::EdgeType hates = dba.EdgeType("hates");
auto edge = dba.InsertEdge(v1, v2, likes);
EXPECT_EQ(edge.EdgeType(), likes);
EXPECT_NE(edge.EdgeType(), hates);
}
TEST(RecordAccessor, EdgeIsCycle) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
auto likes = dba.EdgeType("edge_type");
EXPECT_TRUE(dba.InsertEdge(v1, v1, likes).is_cycle());
EXPECT_TRUE(dba.InsertEdge(v2, v2, likes).is_cycle());
EXPECT_FALSE(dba.InsertEdge(v1, v2, likes).is_cycle());
EXPECT_FALSE(dba.InsertEdge(v2, v1, likes).is_cycle());
}
TEST(RecordAccessor, VertexEdgeConnections) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
auto edge = dba.InsertEdge(v1, v2, dba.EdgeType("likes"));
dba.AdvanceCommand();
EXPECT_EQ(edge.from(), v1);
EXPECT_NE(edge.from(), v2);
EXPECT_EQ(edge.to(), v2);
EXPECT_NE(edge.to(), v1);
EXPECT_EQ(v1.in_degree(), 0);
EXPECT_EQ(v1.out_degree(), 1);
EXPECT_EQ(v2.in_degree(), 1);
EXPECT_EQ(v2.out_degree(), 0);
for (auto e : v1.out()) EXPECT_EQ(edge, e);
for (auto e : v2.in()) EXPECT_EQ(edge, e);
}
#define TEST_EDGE_ITERABLE(iterable, ...) \
{ \
std::vector<EdgeAccessor> edge_accessors; \
auto expected_vec = std::vector<EdgeAccessor>(__VA_ARGS__); \
for (const auto &ea : iterable) edge_accessors.emplace_back(ea); \
ASSERT_EQ(edge_accessors.size(), expected_vec.size()); \
EXPECT_TRUE(std::is_permutation( \
edge_accessors.begin(), edge_accessors.end(), expected_vec.begin())); \
}
TEST(RecordAccessor, VertexEdgeConnectionsWithExistingVertex) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
auto v3 = dba.InsertVertex();
auto edge_type = dba.EdgeType("edge type");
auto e12 = dba.InsertEdge(v1, v2, edge_type);
auto e22 = dba.InsertEdge(v2, v2, edge_type);
auto e23a = dba.InsertEdge(v2, v3, edge_type);
auto e23b = dba.InsertEdge(v2, v3, edge_type);
auto e32 = dba.InsertEdge(v3, v2, edge_type);
dba.AdvanceCommand();
TEST_EDGE_ITERABLE(v1.out(v1));
TEST_EDGE_ITERABLE(v1.out(v2), {e12});
TEST_EDGE_ITERABLE(v1.out(v3));
TEST_EDGE_ITERABLE(v2.out(v1));
TEST_EDGE_ITERABLE(v2.out(v2), {e22});
TEST_EDGE_ITERABLE(v2.out(v3), {e23a, e23b});
TEST_EDGE_ITERABLE(v3.out(v1));
TEST_EDGE_ITERABLE(v3.out(v2), {e32});
TEST_EDGE_ITERABLE(v3.out(v3));
TEST_EDGE_ITERABLE(v1.in(v1));
TEST_EDGE_ITERABLE(v1.in(v2));
TEST_EDGE_ITERABLE(v1.in(v3));
TEST_EDGE_ITERABLE(v2.in(v1), {e12});
TEST_EDGE_ITERABLE(v2.in(v2), {e22});
TEST_EDGE_ITERABLE(v2.in(v3), {e32});
TEST_EDGE_ITERABLE(v3.in(v1));
TEST_EDGE_ITERABLE(v3.in(v2), {e23a, e23b});
TEST_EDGE_ITERABLE(v3.in(v3));
}
TEST(RecordAccessor, VertexEdgeConnectionsWithEdgeType) {
database::GraphDb db;
auto dba = db.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
auto a = dba.EdgeType("a");
auto b = dba.EdgeType("b");
auto c = dba.EdgeType("c");
auto ea = dba.InsertEdge(v1, v2, a);
auto eb_1 = dba.InsertEdge(v2, v1, b);
auto eb_2 = dba.InsertEdge(v2, v1, b);
auto ec = dba.InsertEdge(v1, v2, c);
dba.AdvanceCommand();
TEST_EDGE_ITERABLE(v1.in(), {eb_1, eb_2});
TEST_EDGE_ITERABLE(v2.in(), {ea, ec});
std::vector<storage::EdgeType> edges_a{a};
std::vector<storage::EdgeType> edges_b{b};
std::vector<storage::EdgeType> edges_ac{a, c};
TEST_EDGE_ITERABLE(v1.in(&edges_a));
TEST_EDGE_ITERABLE(v1.in(&edges_b), {eb_1, eb_2});
TEST_EDGE_ITERABLE(v1.out(&edges_a), {ea});
TEST_EDGE_ITERABLE(v1.out(&edges_b));
TEST_EDGE_ITERABLE(v1.out(&edges_ac), {ea, ec});
TEST_EDGE_ITERABLE(v2.in(&edges_a), {ea});
TEST_EDGE_ITERABLE(v2.in(&edges_b));
TEST_EDGE_ITERABLE(v2.out(&edges_a));
TEST_EDGE_ITERABLE(v2.out(&edges_b), {eb_1, eb_2});
}
#undef TEST_EDGE_ITERABLE

View File

@ -1,47 +0,0 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <vector>
#include "data_structures/concurrent/skiplist.hpp"
TEST(SkipList, Access) {
SkipList<int> input;
{
auto accessor = input.access();
accessor.insert(1);
accessor.insert(2);
accessor.insert(3);
}
auto accessor = input.access();
std::vector<int> results;
for (auto it = accessor.begin(); it != accessor.end(); ++it)
results.push_back(*it);
EXPECT_THAT(results, testing::ElementsAre(1, 2, 3));
}
TEST(SkipList, ConstAccess) {
SkipList<int> input;
{
auto accessor = input.access();
accessor.insert(1);
accessor.insert(2);
accessor.insert(3);
}
const SkipList<int> &skiplist = input;
auto accessor = skiplist.access();
std::vector<int> results;
for (auto it = accessor.begin(); it != accessor.end(); ++it)
results.push_back(*it);
EXPECT_THAT(results, testing::ElementsAre(1, 2, 3));
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,129 +0,0 @@
#include <chrono>
#include <memory>
#include <thread>
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include "data_structures/concurrent/skiplist_gc.hpp"
/**
* FakeItem class which increments a variable in the destructor.
* Used to keep track of the number of destroyed elements in GC.
*/
class FakeItem {
public:
FakeItem(std::atomic<int> &count, int value) : count(count), value(value) {}
~FakeItem() { count.fetch_add(1); }
bool operator<(const FakeItem &item) const {
return this->value < item.value;
}
bool operator>(const FakeItem &item) const {
return this->value > item.value;
}
static void destroy(FakeItem *item) { delete item; }
private:
std::atomic<int> &count;
int value;
};
DECLARE_int32(skiplist_gc_interval);
TEST(SkipListGC, CreateNewAccessors) {
FLAGS_skiplist_gc_interval = -1;
SkipListGC<FakeItem> gc;
auto &accessor1 = gc.CreateNewAccessor();
auto &accessor2 = gc.CreateNewAccessor();
auto &accessor3 = gc.CreateNewAccessor();
EXPECT_EQ(accessor1.id_, 1);
EXPECT_EQ(accessor2.id_, 2);
EXPECT_EQ(accessor3.id_, 3);
accessor1.alive_ = false;
accessor2.alive_ = false;
accessor3.alive_ = false;
}
TEST(SkipListGC, DeleteItem) {
FLAGS_skiplist_gc_interval = -1;
SkipListGC<FakeItem> gc;
auto &accessor1 = gc.CreateNewAccessor();
std::atomic<int> count{0};
auto item1 = new FakeItem(count, 1);
gc.Collect(item1);
// Kill the accesssor
accessor1.alive_ = false;
gc.GarbageCollect();
EXPECT_EQ(count, 1);
}
TEST(SkipListGC, DontDeleteItem) {
FLAGS_skiplist_gc_interval = -1;
SkipListGC<FakeItem> gc;
auto &accessor1 = gc.CreateNewAccessor();
auto &accessor2 = gc.CreateNewAccessor();
std::atomic<int> count{0};
auto item1 = new FakeItem(count, 1);
gc.Collect(item1);
// Kill the accesssor
accessor2.alive_ = false;
// Nothing deleted because accessor1 is blocking.
gc.GarbageCollect();
EXPECT_EQ(count, 0);
// Accessor 1 is not blocking anymore.
accessor1.alive_ = false;
gc.GarbageCollect();
EXPECT_EQ(count, 1);
}
TEST(SkipListGC, Destructor) {
FLAGS_skiplist_gc_interval = -1;
std::atomic<int> count{0};
auto item1 = new FakeItem(count, 1);
{
SkipListGC<FakeItem> gc;
gc.Collect(item1);
EXPECT_EQ(count, 0);
}
EXPECT_EQ(count, 1);
}
TEST(SkipListGC, MultipleDeletes) {
FLAGS_skiplist_gc_interval = -1;
SkipListGC<FakeItem> gc;
std::atomic<int> count{0};
auto &accessor1 = gc.CreateNewAccessor();
auto item1 = new FakeItem(count, 1);
gc.Collect(item1);
auto &accessor2 = gc.CreateNewAccessor();
auto item2 = new FakeItem(count, 1);
gc.Collect(item2);
auto &accessor3 = gc.CreateNewAccessor();
auto item3 = new FakeItem(count, 1);
gc.Collect(item3);
accessor1.alive_ = false;
accessor2.alive_ = false;
gc.GarbageCollect();
EXPECT_EQ(count, 2);
accessor3.alive_ = false;
gc.GarbageCollect();
EXPECT_EQ(count, 3);
}
int main(int argc, char **argv) {
google::InitGoogleLogging(argv[0]);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,110 +0,0 @@
#include <algorithm>
#include <memory>
#include <vector>
#include "glog/logging.h"
#include "gtest/gtest.h"
#include "data_structures/concurrent/skiplist.hpp"
/* The following tests validate the SkipList::position_and_count estimation
* functionality. That function has a tunable speed vs. accuracy. The tests
* here test the absolutely-accurate parameterization, as well as the default
* one that should be optimal parametrization. As such the tests are
* stochastic and defined to validate generally acceptable behavior in
* a vast majority of cases. The probability of test failure due to
* stochasticity should be extremely small, but isn't zero.
*/
auto SkiplistRange(int count) {
auto sl = std::make_unique<SkipList<int>>();
auto access = sl->access();
for (int i = 0; i < count; i++) access.insert(i);
return sl;
}
auto Median(std::vector<int> &elements) {
auto elem_size = elements.size();
DCHECK(elem_size > 0) << "Provide some elements to get median!";
std::sort(elements.begin(), elements.end());
if (elem_size % 2)
return elements[elem_size / 2];
else
return (elements[elem_size / 2 - 1] + elements[elem_size / 2]) / 2;
}
auto Less(int granularity) {
return [granularity](const int &a, const int &b) {
return a / granularity < b / granularity;
};
}
auto Equal(int granularity) {
return [granularity](const int &a, const int &b) {
return a / granularity == b / granularity;
};
}
#define EXPECT_ABS_POS_COUNT(granularity, position, expected_position, \
expected_count) \
{ \
auto sl = SkiplistRange(10000); \
auto position_and_count = sl->access().position_and_count( \
position, Less(granularity), Equal(granularity), 1000, 0); \
EXPECT_EQ(position_and_count.first, expected_position); \
EXPECT_EQ(position_and_count.second, expected_count); \
}
TEST(SkiplistPosAndCount, AbsoluteAccuracy) {
EXPECT_ABS_POS_COUNT(1, 42, 42, 1);
EXPECT_ABS_POS_COUNT(3, 42, 42, 3);
EXPECT_ABS_POS_COUNT(10, 42, 40, 10);
}
#define EXPECT_POS_COUNT(skiplist_size, position, expected_count, \
position_error_margin, count_error_margin) \
{ \
std::vector<int> pos_errors; \
std::vector<int> count_errors; \
\
for (int i = 0; i < 30; i++) { \
auto sl = SkiplistRange(skiplist_size); \
auto position_count = sl->access().position_and_count(position); \
pos_errors.push_back(std::abs((long)position_count.first - position)); \
count_errors.push_back( \
std::abs((long)position_count.second - expected_count)); \
} \
EXPECT_LE(Median(pos_errors), position_error_margin); \
EXPECT_LE(Median(count_errors), count_error_margin); \
}
TEST(SkiplistPosAndCount, DefaultSpeedAndAccuracy) {
EXPECT_POS_COUNT(5000, 42, 1, 20, 3);
EXPECT_POS_COUNT(5000, 2500, 1, 100, 3);
EXPECT_POS_COUNT(5000, 4500, 1, 200, 3);
// for an item greater then all list elements the returned
// estimations are always absolutely accurate
EXPECT_POS_COUNT(5000, 5000, 0, 0, 0);
}
#define EXPECT_FOR_OUT_OF_RANGE(skiplist_size, value) \
{ \
auto sl = SkiplistRange(skiplist_size); \
auto position_and_count = sl->access().position_and_count(value); \
EXPECT_EQ(position_and_count.first, value < 0 ? 0 : skiplist_size); \
EXPECT_EQ(position_and_count.second, 0); \
}
TEST(SkiplistPosAndCount, EdgeValues) {
// small list
EXPECT_FOR_OUT_OF_RANGE(100, -20);
EXPECT_FOR_OUT_OF_RANGE(100, -1);
EXPECT_FOR_OUT_OF_RANGE(100, 100);
EXPECT_FOR_OUT_OF_RANGE(100, 120);
// big list
EXPECT_FOR_OUT_OF_RANGE(100000, -20);
EXPECT_FOR_OUT_OF_RANGE(100000, -1);
EXPECT_FOR_OUT_OF_RANGE(100000, 100000);
EXPECT_FOR_OUT_OF_RANGE(100000, 100300);
}

View File

@ -1,414 +0,0 @@
/**
@date: 2017-01-2017
@authors: Sandi Fatic
@brief
These tests are used to test the functionality of the reverse() function in
a single threaded scenario. For more concurrent tests look in the concurrent
testing folder.
@todo
Concurrent tests are missing for now.
*/
#include <algorithm>
#include "gtest/gtest.h"
#include "data_structures/concurrent/skiplist.hpp"
#include "utils/random/random_generator.hpp"
using utils::random::NumberGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
/*
Tests Skiplist rbegin() and rend() iterators on a sequential dataset.
*/
TEST(SkipListReverseIteratorTest, SequentialIteratorsBeginToEnd) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
auto rbegin = accessor.rbegin();
auto rend = accessor.rend();
while (rbegin != rend) {
ASSERT_EQ(number_of_elements - 1, *rbegin);
rbegin++;
number_of_elements--;
}
}
/*
Tests Skiplist rbegin() and rend() iterators on a random dataset.
*/
TEST(SkipListReverseIteratorTest, RandomIteratorsBeginToEnd) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
IntegerGenerator generator(0, 1000000000);
std::set<int> elems_set =
utils::random::generate_set(generator, number_of_elements);
int end = elems_set.size();
std::vector<int> elems_descending(end);
for (auto el : elems_set) {
end--;
accessor.insert(std::move(el));
elems_descending[end] = el;
}
auto rbegin = accessor.rbegin();
auto rend = accessor.rend();
while (rbegin != rend) {
ASSERT_EQ(elems_descending[end], *rbegin);
end++;
rbegin++;
}
}
/*
Tests Skiplist reverse() when element exists. The skiplist uses a sequential
dataset and the element provided exists is in range of the dataset. The
reverse function should return an std::pair<iterator_to_element_before, true>.
*/
TEST(SkipListReverseIteratorTest, SequentialIteratorsElementExists) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
int element = 1024 * 8;
auto reverse_pair = accessor.reverse(element);
ASSERT_EQ(reverse_pair.second, true);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
while (rbegin != rend) {
ASSERT_EQ(element - 1, *rbegin);
rbegin++;
element--;
}
}
/*
Tests Skiplist reverse() when element exists. The skiplist uses a random
dataset and the element provide exists in the random dataset. The reverse
function should return an std::pair<iterator_to_element_before, true>.
*/
TEST(SkipListReverseIteratorTest, RandomIteratorsElementExists) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
IntegerGenerator generator(0, 1000000000);
std::set<int> elems_set =
utils::random::generate_set(generator, number_of_elements);
int end = elems_set.size();
std::vector<int> elems_descending(end);
for (auto el : elems_set) {
end--;
accessor.insert(std::move(el));
elems_descending[end] = el;
}
int middle = end / 2;
auto reverse_pair = accessor.reverse(elems_descending[middle]);
ASSERT_EQ(reverse_pair.second, true);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
while (rbegin != rend) {
ASSERT_EQ(elems_descending[middle + 1], *rbegin);
middle++;
rbegin++;
}
}
/*
Tests Skiplist reverse() when element exists and the element is the first one
in the skiplist. The skiplist uses a sequential dataset. The reverse function
should return an std::pair<rend, true>.
*/
TEST(SkipListReverseIteratorTest, SequentialIteratorsMinimumElement) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
auto reverse_pair = accessor.reverse(0);
ASSERT_EQ(reverse_pair.second, true);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
ASSERT_EQ(rbegin, rend);
}
/*
Tests Skiplist reverse() when element exists and the element is the first one
in the skiplist. The skiplist uses a random dataset. The reverse function
should return an std::pair<rend, true>.
*/
TEST(SkipListReverseIteratorTest, RandomIteratorsMinimumElement) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
IntegerGenerator generator(0, 1000000000);
std::set<int> elems_set =
utils::random::generate_set(generator, number_of_elements);
auto min_el = std::min_element(elems_set.begin(), elems_set.end());
for (auto el : elems_set) accessor.insert(std::move(el));
auto reverse_pair = accessor.reverse(*min_el);
ASSERT_EQ(reverse_pair.second, true);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
ASSERT_EQ(rbegin, rend);
}
/*
Tests Skiplist reverse() when element exists and the element is the last one
in the skiplist. The skiplist uses a sequential dataset. The reverse function
should return an std::pair<iterator_to_smaller_element, true>.
*/
TEST(SkipListReverseIteratorTest, SequentialIteratorsMaximumElement) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
auto reverse_pair = accessor.reverse(number_of_elements - 1);
ASSERT_EQ(reverse_pair.second, true);
auto rbegin = reverse_pair.first;
auto rbeing_real = accessor.rbegin();
rbeing_real++;
ASSERT_EQ(rbegin, rbeing_real);
}
/*
Tests Skiplist reverse() when element exists and the element is the last one
in the skiplist. the skiplist uses a random dataset. The reverse function
should return and std::pair,iterator_to_smaller_element, true>.
*/
TEST(SkipListReverseIteratorTest, RandomIteratorsMaximumElement) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
IntegerGenerator generator(0, 1000000000);
std::set<int> elems_set =
utils::random::generate_set(generator, number_of_elements);
auto max_el = std::max_element(elems_set.begin(), elems_set.end());
for (auto el : elems_set) accessor.insert(std::move(el));
auto reverse_pair = accessor.reverse(*max_el);
ASSERT_EQ(reverse_pair.second, true);
auto rbegin = reverse_pair.first;
auto rbeing_real = accessor.rbegin();
rbeing_real++;
ASSERT_EQ(rbegin, rbeing_real);
}
/*
Tests Skipslist reverse() when element out of bounds. The skiplist uses a
sequential dataset and the element provided is bigger then the last element
in skiplist. Reverse function should return an std::pair<rend, false>.
*/
TEST(SkipListReverseIteratorTest, SequentialIteratorsElementBiggerThanLast) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
auto reverse_pair = accessor.reverse(number_of_elements + 1);
ASSERT_EQ(reverse_pair.second, false);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
ASSERT_EQ(rend, rbegin);
}
/*
Tests Skipslist reverse() when element out of bounds. The skiplist uses a
random dataset and the element provide is bigger then the last element in the
skiplist. Reverse function should return an std::pair<rend, false>.
*/
TEST(SkipListReverseIteratorTest, RandomIteratorsElementBiggerThanLast) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
IntegerGenerator generator(0, 1000000000);
std::set<int> elems_set =
utils::random::generate_set(generator, number_of_elements);
auto max_el = std::max_element(elems_set.begin(), elems_set.end());
for (auto el : elems_set) accessor.insert(std::move(el));
auto reverse_pair = accessor.reverse(*max_el + 1);
ASSERT_EQ(reverse_pair.second, false);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
ASSERT_EQ(rend, rbegin);
}
/*
Tests Skipslist reverse() when element out of bounds.
The skiplist uses a sequential dataset and the element provided is lower
then the first element in skiplist. Reverse function should return an
std::pair<rend, false>.
*/
TEST(SkipListReverseIteratorTest, SequentialIteratorsElementLowerThanFirst) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
auto reverse_pair = accessor.reverse(-1);
ASSERT_EQ(reverse_pair.second, false);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
ASSERT_EQ(rend, rbegin);
}
/*
Tests Skiplist reverse() when element out of bounds. The skiplist uses a
random dataset and the element provided is lower then the first element in
skiplist. Reverse function should return an std::pair<rend, false>.
*/
TEST(SkipListReverseIteratorTest, RandomIteratorsElementLowerThanFirst) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
IntegerGenerator generator(0, 1000000000);
std::set<int> elems_set =
utils::random::generate_set(generator, number_of_elements);
auto min_el = std::min_element(elems_set.begin(), elems_set.end());
for (auto el : elems_set) accessor.insert(std::move(el));
auto reverse_pair = accessor.reverse(*min_el - 1);
ASSERT_EQ(reverse_pair.second, false);
auto rbegin = reverse_pair.first;
auto rend = accessor.rend();
ASSERT_EQ(rend, rbegin);
}
/*
Tests Skiplist ReverseIterator when concurrently inserting an element while
iterating. The inserted element should also be traversed.
*/
TEST(SkipListReverseIteratorTest, InsertWhileIteratingTest) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 1; i < number_of_elements; i += 2) accessor.insert(std::move(i));
auto rbegin = accessor.rbegin();
auto rend = accessor.rend();
for (int i = 0; i < number_of_elements; i += 2) accessor.insert(std::move(i));
int element = number_of_elements - 1;
while (rbegin != rend) {
ASSERT_EQ(element, *rbegin);
rbegin++;
element--;
}
}
/*
Tests Skiplist ReverseIterator when concurrently deleting an element while
iterating. The deleted element shouldn't be traversed except if the element
is deleted while pointing to the element.
*/
TEST(SkipListReverseIteratorTest, DeleteWhileIteratingTest) {
SkipList<int> skiplist;
auto accessor = skiplist.access();
int number_of_elements = 1024 * 16;
for (int i = 0; i < number_of_elements; i++) accessor.insert(std::move(i));
auto rbegin = accessor.rbegin();
auto rend = accessor.rend();
int element = number_of_elements - 2;
// check element which will be deleted
rbegin++;
ASSERT_EQ(element, *rbegin);
// delete elements
for (int i = 0; i < number_of_elements; i += 2) accessor.remove(i);
// check if still points to the same after delete
ASSERT_EQ(element, *rbegin);
rbegin++;
// check all deleted elements after
while (rbegin != rend && element > 0) {
ASSERT_EQ(element - 1, *rbegin);
rbegin++;
element -= 2;
}
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,42 +0,0 @@
#include <algorithm>
#include <iterator>
#include <vector>
#include <gtest/gtest.h>
#include "data_structures/concurrent/skiplist.hpp"
#include "storage/common/index.hpp"
template <class TIterable>
int Count(TIterable &collection) {
int ret = 0;
for (__attribute__((unused)) auto it : collection) ret += 1;
return ret;
}
TEST(SkipListSuffix, EmptyRange) {
SkipList<int> V;
auto access = V.access();
auto r1 = database::index::SkipListSuffix<typename SkipList<int>::Iterator,
int, SkipList<int>>(
access.begin(), std::move(access));
EXPECT_EQ(Count(r1), 0);
}
TEST(SkipListSuffix, NonEmptyRange) {
SkipList<int> V;
auto access = V.access();
access.insert(1);
access.insert(5);
access.insert(3);
auto r1 = database::index::SkipListSuffix<typename SkipList<int>::Iterator,
int, SkipList<int>>(
access.begin(), std::move(access));
EXPECT_EQ(Count(r1), 3);
auto iter = r1.begin();
EXPECT_EQ(*iter, 1);
++iter;
EXPECT_EQ(*iter, 3);
++iter;
EXPECT_EQ(*iter, 5);
}

View File

@ -1,208 +0,0 @@
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "durability/single_node/state_delta.hpp"
TEST(StateDelta, CreateVertex) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
{
auto dba = db.Access();
auto delta =
database::StateDelta::CreateVertex(dba.transaction_id(), gid0);
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto vertex = dba.FindVertexOptional(gid0, false);
EXPECT_TRUE(vertex);
EXPECT_EQ(vertex->CypherId(), 0);
}
}
TEST(StateDelta, RemoveVertex) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
{
auto dba = db.Access();
dba.InsertVertex(gid0);
dba.Commit();
}
{
auto dba = db.Access();
auto delta =
database::StateDelta::RemoveVertex(dba.transaction_id(), gid0, true);
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto vertex = dba.FindVertexOptional(gid0, false);
EXPECT_FALSE(vertex);
}
}
TEST(StateDelta, CreateEdge) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
auto gid1 = generator.Next();
auto gid2 = generator.Next();
{
auto dba = db.Access();
dba.InsertVertex(gid0);
dba.InsertVertex(gid1);
dba.Commit();
}
{
auto dba = db.Access();
auto delta =
database::StateDelta::CreateEdge(dba.transaction_id(), gid2, gid0,
gid1, dba.EdgeType("edge"), "edge");
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto edge = dba.FindEdgeOptional(gid2, false);
EXPECT_TRUE(edge);
}
}
TEST(StateDelta, RemoveEdge) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
auto gid1 = generator.Next();
auto gid2 = generator.Next();
{
auto dba = db.Access();
auto v0 = dba.InsertVertex(gid0);
auto v1 = dba.InsertVertex(gid1);
dba.InsertEdge(v0, v1, dba.EdgeType("edge"), gid2);
dba.Commit();
}
{
auto dba = db.Access();
auto delta = database::StateDelta::RemoveEdge(dba.transaction_id(), gid2);
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto edge = dba.FindEdgeOptional(gid2, false);
EXPECT_FALSE(edge);
}
}
TEST(StateDelta, AddLabel) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
{
auto dba = db.Access();
dba.InsertVertex(gid0);
dba.Commit();
}
{
auto dba = db.Access();
auto delta = database::StateDelta::AddLabel(dba.transaction_id(), gid0,
dba.Label("label"), "label");
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto vertex = dba.FindVertexOptional(gid0, false);
EXPECT_TRUE(vertex);
auto labels = vertex->labels();
EXPECT_EQ(labels.size(), 1);
EXPECT_EQ(labels[0], dba.Label("label"));
}
}
TEST(StateDelta, RemoveLabel) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
{
auto dba = db.Access();
auto vertex = dba.InsertVertex(gid0);
vertex.add_label(dba.Label("label"));
dba.Commit();
}
{
auto dba = db.Access();
auto delta = database::StateDelta::RemoveLabel(
dba.transaction_id(), gid0, dba.Label("label"), "label");
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto vertex = dba.FindVertexOptional(gid0, false);
EXPECT_TRUE(vertex);
auto labels = vertex->labels();
EXPECT_EQ(labels.size(), 0);
}
}
TEST(StateDelta, SetPropertyVertex) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
{
auto dba = db.Access();
dba.InsertVertex(gid0);
dba.Commit();
}
{
auto dba = db.Access();
auto delta = database::StateDelta::PropsSetVertex(
dba.transaction_id(), gid0, dba.Property("property"), "property",
PropertyValue(2212));
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto vertex = dba.FindVertexOptional(gid0, false);
EXPECT_TRUE(vertex);
auto prop = vertex->PropsAt(dba.Property("property"));
EXPECT_EQ(prop.ValueInt(), 2212);
}
}
TEST(StateDelta, SetPropertyEdge) {
database::GraphDb db;
storage::GidGenerator generator;
auto gid0 = generator.Next();
auto gid1 = generator.Next();
auto gid2 = generator.Next();
{
auto dba = db.Access();
auto v0 = dba.InsertVertex(gid0);
auto v1 = dba.InsertVertex(gid1);
dba.InsertEdge(v0, v1, dba.EdgeType("edge"), gid2);
dba.Commit();
}
{
auto dba = db.Access();
auto delta = database::StateDelta::PropsSetEdge(
dba.transaction_id(), gid2, dba.Property("property"), "property",
PropertyValue(2212));
delta.Apply(dba);
dba.Commit();
}
{
auto dba = db.Access();
auto edge = dba.FindEdgeOptional(gid2, false);
EXPECT_TRUE(edge);
auto prop = edge->PropsAt(dba.Property("property"));
EXPECT_EQ(prop.ValueInt(), 2212);
}
}

View File

@ -1,64 +0,0 @@
#include <gmock/gmock.h>
#include <vector>
#include "data_structures/bitset/static_bitset.hpp"
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
using testing::UnorderedElementsAreArray;
TEST(StaticBitset, Intersection) {
const int n = 50;
Bitset<int64_t> bitset(n);
Bitset<int64_t> bitset2(n);
std::vector<int> V;
std::vector<int> V2;
for (int i = 0; i < n / 2; ++i) {
const int pos = rand() % n;
bitset.Set(pos);
V.push_back(pos);
}
for (int i = 0; i < n / 2; ++i) {
const int pos = rand() % n;
bitset2.Set(pos);
V2.push_back(pos);
}
Bitset<int64_t> intersected = bitset.Intersect(bitset);
sort(V.begin(), V.end());
V.resize(unique(V.begin(), V.end()) - V.begin());
EXPECT_THAT(V, UnorderedElementsAreArray(intersected.Ones()));
sort(V2.begin(), V2.end());
V2.resize(unique(V2.begin(), V2.end()) - V2.begin());
std::vector<int> V3;
set_intersection(V.begin(), V.end(), V2.begin(), V2.end(), back_inserter(V3));
Bitset<int64_t> intersected_two = bitset.Intersect(bitset2);
EXPECT_THAT(V3, UnorderedElementsAreArray(intersected_two.Ones()));
}
TEST(StaticBitset, BasicFunctionality) {
const int n = 50;
Bitset<int64_t> bitset(n);
std::vector<int> V;
for (int i = 0; i < n / 2; ++i) {
const int pos = rand() % n;
bitset.Set(pos);
V.push_back(pos);
}
sort(V.begin(), V.end());
V.resize(unique(V.begin(), V.end()) - V.begin());
EXPECT_THAT(V, UnorderedElementsAreArray(bitset.Ones()));
}
TEST(StaticBitset, SetAndReadBit) {
const int n = 50;
Bitset<char> bitset(n);
bitset.Set(4);
EXPECT_EQ(bitset.At(4), true);
EXPECT_EQ(bitset.At(3), false);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,71 +0,0 @@
#include <glog/logging.h>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
class StatTest : public ::testing::Test {
public:
database::GraphDb db_;
};
#define COMPARE(stat, vc, ec, ad) \
EXPECT_EQ(stat.vertex_count, vc); \
EXPECT_EQ(stat.edge_count, ec); \
EXPECT_EQ(stat.avg_degree, ad);
TEST_F(StatTest, CountTest1) {
auto &stat = db_.GetStat();
COMPARE(stat, 0, 0, 0);
auto dba = db_.Access();
dba.InsertVertex();
dba.InsertVertex();
dba.InsertVertex();
COMPARE(stat, 0, 0, 0);
db_.RefreshStat();
COMPARE(stat, 3, 0, 0);
}
TEST_F(StatTest, CountTest2) {
auto &stat = db_.GetStat();
COMPARE(stat, 0, 0, 0);
auto dba = db_.Access();
auto type = dba.EdgeType("edge");
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
auto v3 = dba.InsertVertex();
auto v4 = dba.InsertVertex();
dba.InsertEdge(v1, v2, type);
dba.InsertEdge(v2, v2, type);
dba.InsertEdge(v3, v2, type);
dba.InsertEdge(v4, v2, type);
dba.InsertEdge(v1, v3, type);
COMPARE(stat, 0, 0, 0);
db_.RefreshStat();
COMPARE(stat, 4, 5, 2.5);
dba.Commit();
auto dba1 = db_.Access();
auto v22 = dba1.FindVertex(v2.gid(), true);
dba1.DetachRemoveVertex(v22);
db_.RefreshStat();
COMPARE(stat, 4, 5, 2.5);
dba1.Commit();
db_.CollectGarbage();
db_.RefreshStat();
COMPARE(stat, 3, 1, 2.0 / 3);
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
google::InitGoogleLogging(argv[0]);
return RUN_ALL_TESTS();
}

View File

@ -1,153 +0,0 @@
#include "gtest/gtest.h"
#include <optional>
#include <thread>
#include <vector>
#include "data_structures/concurrent/skiplist.hpp"
#include "transactions/single_node/engine.hpp"
#include "transactions/transaction.hpp"
using namespace tx;
TEST(Engine, GcSnapshot) {
Engine engine;
ASSERT_EQ(engine.GlobalGcSnapshot(), Snapshot({1}));
std::vector<Transaction *> transactions;
// create transactions and check the GC snapshot
for (int i = 0; i < 5; ++i) {
transactions.push_back(engine.Begin());
EXPECT_EQ(engine.GlobalGcSnapshot(), Snapshot({1}));
}
// commit transactions in the middle, expect
// the GcSnapshot did not change
engine.Commit(*transactions[1]);
EXPECT_EQ(engine.GlobalGcSnapshot(), Snapshot({1}));
engine.Commit(*transactions[2]);
EXPECT_EQ(engine.GlobalGcSnapshot(), Snapshot({1}));
// have the first three transactions committed
engine.Commit(*transactions[0]);
EXPECT_EQ(engine.GlobalGcSnapshot(), Snapshot({1, 2, 3, 4}));
// commit all
engine.Commit(*transactions[3]);
engine.Commit(*transactions[4]);
EXPECT_EQ(engine.GlobalGcSnapshot(), Snapshot({6}));
}
TEST(Engine, Advance) {
Engine engine;
auto t0 = engine.Begin();
auto t1 = engine.Begin();
EXPECT_EQ(t0->cid(), 1);
engine.Advance(t0->id_);
EXPECT_EQ(t0->cid(), 2);
engine.Advance(t0->id_);
EXPECT_EQ(t0->cid(), 3);
EXPECT_EQ(t1->cid(), 1);
}
TEST(Engine, ConcurrentBegin) {
Engine engine;
std::vector<std::thread> threads;
SkipList<TransactionId> tx_ids;
for (int i = 0; i < 10; ++i) {
threads.emplace_back([&engine, accessor = tx_ids.access()]() mutable {
for (int j = 0; j < 100; ++j) {
auto t = engine.Begin();
accessor.insert(t->id_);
}
});
}
for (auto &t : threads) t.join();
EXPECT_EQ(tx_ids.access().size(), 1000);
}
TEST(Engine, RunningTransaction) {
Engine engine;
auto t0 = engine.Begin();
auto t1 = engine.Begin();
EXPECT_EQ(t0, engine.RunningTransaction(t0->id_));
EXPECT_NE(t1, engine.RunningTransaction(t0->id_));
EXPECT_EQ(t1, engine.RunningTransaction(t1->id_));
}
TEST(Engine, EnsureTxIdGreater) {
Engine engine;
ASSERT_LE(engine.Begin()->id_, 40);
engine.EnsureNextIdGreater(42);
EXPECT_EQ(engine.Begin()->id_, 43);
}
TEST(Engine, BlockingTransaction) {
Engine engine;
std::vector<std::thread> threads;
std::atomic<bool> finished{false};
std::atomic<bool> blocking_started{false};
std::atomic<bool> blocking_finished{false};
std::atomic<int> tx_counter{0};
for (int i = 0; i < 10; ++i) {
threads.emplace_back([&engine, &tx_counter, &finished]() mutable {
auto t = engine.Begin();
tx_counter++;
while (!finished.load()) {
std::this_thread::sleep_for(std::chrono::microseconds(100));
}
engine.Commit(*t);
});
}
// Wait for all transactions to start.
do {
std::this_thread::sleep_for(std::chrono::microseconds(100));
} while (tx_counter.load() < 10);
threads.emplace_back([&engine, &blocking_started, &blocking_finished]() {
// This should block until other transactions end.
blocking_started.store(true);
auto t = engine.BeginBlocking(std::nullopt);
engine.Commit(*t);
blocking_finished.store(true);
});
EXPECT_FALSE(finished.load());
EXPECT_FALSE(blocking_finished.load());
EXPECT_EQ(tx_counter.load(), 10);
// Make sure the blocking transaction thread kicked off.
do {
std::this_thread::sleep_for(std::chrono::microseconds(100));
} while (!blocking_started.load());
// Make sure we can't start any new transaction
EXPECT_THROW(engine.Begin(), TransactionEngineError);
EXPECT_THROW(engine.BeginBlocking(std::nullopt), TransactionEngineError);
// Release regular transactions. This will cause the blocking transaction to
// end also.
finished.store(true);
for (auto &t : threads) {
if (t.joinable()) {
t.join();
}
}
EXPECT_TRUE(blocking_finished.load());
// Make sure we can start transactions now.
{
auto t = engine.Begin();
EXPECT_NE(t, nullptr);
engine.Commit(*t);
}
{
auto t = engine.BeginBlocking(std::nullopt);
EXPECT_NE(t, nullptr);
engine.Commit(*t);
}
}

View File

@ -1,298 +0,0 @@
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "storage/common/constraints/unique_constraints.hpp"
using storage::constraints::ConstraintEntry;
using storage::constraints::UniqueConstraints;
class UniqueConstraintsTest : public ::testing::Test {
protected:
void SetUp() override {
auto dba = db_.Access();
label_ = dba.Label("label");
property1_ = dba.Property("property1");
property2_ = dba.Property("property2");
property3_ = dba.Property("property3");
dba.BuildUniqueConstraint(label_, {property1_, property2_, property3_});
dba.Commit();
}
database::GraphDb db_;
storage::Label label_;
storage::Property property1_;
storage::Property property2_;
storage::Property property3_;
PropertyValue value1_{"value1"};
PropertyValue value2_{"value2"};
PropertyValue value3_{"value3"};
UniqueConstraints constraints_;
};
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, BuildDrop) {
auto constraint =
ConstraintEntry{label_, {property1_, property2_, property3_}};
constraints_.AddConstraint(constraint);
EXPECT_TRUE(
constraints_.Exists(label_, {property2_, property1_, property3_}));
EXPECT_TRUE(
constraints_.Exists(label_, {property1_, property2_, property3_}));
EXPECT_FALSE(constraints_.Exists(label_, {property2_, property3_}));
constraints_.RemoveConstraint(constraint);
EXPECT_FALSE(
constraints_.Exists(label_, {property2_, property1_, property3_}));
EXPECT_FALSE(
constraints_.Exists(label_, {property1_, property2_, property3_}));
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, BuildWithViolation) {
auto dba = db_.Access();
dba.DeleteUniqueConstraint(label_, {property1_, property2_, property3_});
dba.Commit();
auto dba1 = db_.Access();
auto v1 = dba1.InsertVertex();
v1.add_label(label_);
v1.PropsSet(property1_, value1_);
v1.PropsSet(property2_, value2_);
v1.PropsSet(property3_, value3_);
auto v2 = dba1.InsertVertex();
v2.add_label(label_);
v2.PropsSet(property1_, value1_);
v2.PropsSet(property3_, value3_);
auto v3 = dba1.InsertVertex();
v3.add_label(label_);
v3.PropsSet(property3_, value3_);
v3.PropsSet(property1_, value1_);
v3.PropsSet(property2_, value2_);
dba1.Commit();
auto dba2 = db_.Access();
EXPECT_THROW(
dba2.BuildUniqueConstraint(label_, {property1_, property2_, property3_}),
database::ConstraintViolationException);
dba2.Commit();
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertInsert) {
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.add_label(label_);
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.PropsSet(property3_, value3_);
dba.Commit();
}
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.add_label(label_);
v.PropsSet(property3_, value3_);
v.PropsSet(property2_, value2_);
EXPECT_THROW(v.PropsSet(property1_, value1_),
database::ConstraintViolationException);
dba.Commit();
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertInsertDiffValues) {
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.add_label(label_);
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.PropsSet(property3_, value3_);
dba.Commit();
}
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
PropertyValue other3("Some other value 3");
v.PropsSet(property3_, other3);
v.add_label(label_);
PropertyValue other2("Some other value 2");
v.PropsSet(property2_, other2);
PropertyValue other1("Some other value 1");
v.PropsSet(property1_, other1);
dba.Commit();
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertAbortInsert) {
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property1_, value1_);
v.add_label(label_);
v.PropsSet(property2_, value2_);
v.PropsSet(property3_, value3_);
dba.Abort();
}
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.PropsSet(property3_, value3_);
v.add_label(label_);
dba.Commit();
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertRemoveAbortInsert) {
auto gid = storage::Gid::FromInt(0);
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.PropsSet(property3_, value3_);
v.add_label(label_);
dba.Commit();
}
{
auto dba = db_.Access();
auto v = dba.FindVertex(gid, false);
v.PropsErase(property2_);
dba.Abort();
}
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property1_, value1_);
v.add_label(label_);
v.PropsSet(property2_, value2_);
EXPECT_THROW(v.PropsSet(property3_, value3_),
database::ConstraintViolationException);
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertInsertSameTransaction) {
{
auto dba = db_.Access();
auto v1 = dba.InsertVertex();
auto v2 = dba.InsertVertex();
v1.add_label(label_);
v2.add_label(label_);
v1.PropsSet(property1_, value1_);
v1.PropsSet(property2_, value2_);
v2.PropsSet(property2_, value2_);
v2.PropsSet(property3_, value3_);
v2.PropsSet(property1_, value1_);
EXPECT_THROW(v1.PropsSet(property3_, value3_),
database::ConstraintViolationException);
dba.Commit();
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertInsertReversed) {
auto dba1 = db_.Access();
auto dba2 = db_.Access();
auto v1 = dba1.InsertVertex();
auto v2 = dba2.InsertVertex();
v1.add_label(label_);
v2.add_label(label_);
v1.PropsSet(property1_, value1_);
v1.PropsSet(property2_, value2_);
v2.PropsSet(property2_, value2_);
v2.PropsSet(property3_, value3_);
v2.PropsSet(property1_, value1_);
EXPECT_THROW(v1.PropsSet(property3_, value3_), mvcc::SerializationError);
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertRemoveInsert) {
auto gid = storage::Gid::FromInt(0);
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.PropsSet(property3_, value3_);
v.add_label(label_);
dba.Commit();
}
{
auto dba = db_.Access();
auto v = dba.FindVertex(gid, false);
v.PropsErase(property2_);
dba.Commit();
}
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property1_, value1_);
v.add_label(label_);
v.PropsSet(property2_, value2_);
v.PropsSet(property3_, value3_);
dba.Commit();
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertRemoveInsertSameTransaction) {
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.PropsSet(property3_, value3_);
v.add_label(label_);
v.PropsErase(property2_);
v.PropsSet(property2_, value2_);
dba.Commit();
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST_F(UniqueConstraintsTest, InsertDropInsert) {
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property1_, value1_);
v.add_label(label_);
v.PropsSet(property2_, value2_);
v.PropsSet(property3_, value3_);
dba.Commit();
}
{
auto dba = db_.Access();
dba.DeleteUniqueConstraint(label_, {property2_, property3_, property1_});
dba.Commit();
}
{
auto dba = db_.Access();
auto v = dba.InsertVertex();
v.PropsSet(property2_, value2_);
v.PropsSet(property1_, value1_);
v.add_label(label_);
v.PropsSet(property3_, value3_);
dba.Commit();
}
}
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
google::InitGoogleLogging(argv[0]);
return RUN_ALL_TESTS();
}

View File

@ -9,5 +9,3 @@ disallow_in_source_build()
set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME "tools")
add_subdirectory(src)
enable_testing()
add_subdirectory(tests)

View File

@ -1,7 +1,3 @@
# CSV Import Tool Target
add_executable(mg_import_csv mg_import_csv/main.cpp)
target_link_libraries(mg_import_csv mg-single-node kvstore_dummy_lib)
# Generate a version.hpp file
set(VERSION_STRING ${memgraph_VERSION})
configure_file(../../src/version.hpp.in version.hpp @ONLY)
@ -23,18 +19,12 @@ target_link_libraries(mg_dump fmt gflags glog mgclient pthread)
# Strip the executable in release build.
string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
if (lower_build_type STREQUAL "release")
add_custom_command(TARGET mg_import_csv POST_BUILD
COMMAND strip -s mg_import_csv
COMMENT "Stripping symbols and sections from mg_import_csv")
add_custom_command(TARGET mg_client POST_BUILD
COMMAND strip -s mg_client
COMMENT "Stripping symbols and sections from mg_client")
endif()
# TODO (mferencevic): Currently the `mg_import_csv` tool is tailored to the old
# storage and doesn't work with storage-v2.
# install(TARGETS mg_import_csv RUNTIME DESTINATION bin)
install(TARGETS mg_client RUNTIME DESTINATION bin)
# Target for building all the tool executables.
add_custom_target(tools DEPENDS mg_import_csv mg_client mg_dump)
add_custom_target(tools DEPENDS mg_client mg_dump)

View File

@ -1,525 +0,0 @@
#include <algorithm>
#include <cstdio>
#include <filesystem>
#include <fstream>
#include <optional>
#include <unordered_map>
#include <gflags/gflags.h>
#include <glog/logging.h>
#include <cppitertools/chain.hpp>
#include "communication/bolt/v1/encoder/base_encoder.hpp"
#include "config.hpp"
#include "durability/hashed_file_writer.hpp"
#include "durability/single_node/paths.hpp"
#include "durability/single_node/snapshooter.hpp"
#include "durability/single_node/version.hpp"
#include "utils/cast.hpp"
#include "utils/string.hpp"
#include "utils/timer.hpp"
// Snapshot layout is described in durability/version.hpp
static_assert(durability::kVersion == 11,
"Wrong snapshot version, please update!");
bool ValidateNotEmpty(const char *flagname, const std::string &value) {
if (utils::Trim(value).empty()) {
printf("The argument '%s' is required\n", flagname);
return false;
}
return true;
}
bool ValidateNotNewline(const char *flagname, const std::string &value) {
auto has_no_newline = value.find('\n') == std::string::npos;
if (!has_no_newline) {
printf("The argument '%s' cannot contain newline character\n", flagname);
}
return has_no_newline;
}
bool ValidateNoWhitespace(const char *flagname, const std::string &value) {
auto trimmed = utils::Trim(value);
if (trimmed.empty() && !value.empty()) {
printf("The argument '%s' cannot be only whitespace\n", flagname);
return false;
} else if (!trimmed.empty()) {
for (auto c : trimmed) {
if (std::isspace(c)) {
printf("The argument '%s' cannot contain whitespace\n", flagname);
return false;
}
}
}
return true;
}
DEFINE_string(out, "",
"Destination for the created snapshot file. Without it, snapshot "
"is written inside the expected snapshots directory of Memgraph "
"installation.");
DEFINE_bool(overwrite, false, "Overwrite the output file if it exists");
DEFINE_string(array_delimiter, ";",
"Delimiter between elements of array values, default is ';'");
DEFINE_string(csv_delimiter, ",",
"Delimiter between each field in the CSV, default is ','");
DEFINE_string(quote, "\"",
"Quotation character, default is '\"'. Cannot contain '\n'");
DEFINE_validator(quote, &ValidateNotNewline);
DEFINE_bool(skip_duplicate_nodes, false,
"Skip duplicate nodes or raise an error (default)");
// Arguments `--nodes` and `--relationships` can be input multiple times and are
// handled with custom parsing.
DEFINE_string(nodes, "", "CSV file containing graph nodes (vertices)");
DEFINE_validator(nodes, &ValidateNotEmpty);
DEFINE_string(node_label, "",
"Specify additional label for nodes. To add multiple labels, "
"repeat the flag multiple times");
DEFINE_validator(node_label, &ValidateNoWhitespace);
DEFINE_string(relationships, "",
"CSV file containing graph relationships (edges)");
DEFINE_string(relationship_type, "",
"Overwrite the relationship type from csv with the given value");
DEFINE_validator(relationship_type, &ValidateNoWhitespace);
auto ParseRepeatedFlag(const std::string &flagname, int argc, char *argv[]) {
std::vector<std::string> values;
for (int i = 1; i < argc; ++i) {
std::string flag(argv[i]);
int matched_flag_dashes = 0;
if (utils::StartsWith(flag, "--" + flagname))
matched_flag_dashes = 2;
else if (utils::StartsWith(flag, "-" + flagname))
matched_flag_dashes = 1;
// Get the value if we matched the flag.
if (matched_flag_dashes != 0) {
std::string value;
auto maybe_value = flag.substr(flagname.size() + matched_flag_dashes);
if (maybe_value.empty() && i + 1 < argc)
value = argv[++i];
else if (!maybe_value.empty() && maybe_value.front() == '=')
value = maybe_value.substr(1);
CHECK(!value.empty()) << "The argument '" << flagname << "' is required";
values.push_back(value);
}
}
return values;
}
// A field describing the CSV column.
struct Field {
// Name of the field.
std::string name;
// Type of the values under this field.
std::string type;
};
// A node ID from CSV format.
struct NodeId {
std::string id;
// Group/space of IDs. ID must be unique in a single group.
std::string id_space;
};
bool operator==(const NodeId &a, const NodeId &b) {
return a.id == b.id && a.id_space == b.id_space;
}
auto &operator<<(std::ostream &stream, const NodeId &node_id) {
return stream << fmt::format("{}({})", node_id.id, node_id.id_space);
}
namespace std {
template <>
struct hash<NodeId> {
size_t operator()(const NodeId &node_id) const {
size_t id_hash = std::hash<std::string>{}(node_id.id);
size_t id_space_hash = std::hash<std::string>{}(node_id.id_space);
return id_hash ^ (id_space_hash << 1);
}
};
} // namespace std
class MemgraphNodeIdMap {
public:
std::optional<storage::Gid> Get(const NodeId &node_id) const {
auto found_it = node_id_to_mg_.find(node_id);
if (found_it == node_id_to_mg_.end()) return std::nullopt;
return found_it->second;
}
storage::Gid Insert(const NodeId &node_id) {
auto gid = generator_.Next();
node_id_to_mg_[node_id] = gid;
return gid;
}
private:
storage::GidGenerator generator_;
std::unordered_map<NodeId, storage::Gid> node_id_to_mg_;
};
std::vector<std::string> ReadRow(std::istream &stream) {
std::vector<std::string> row;
bool quoting = false;
std::vector<char> column;
std::string line;
auto check_quote = [&line](int curr_idx) {
return curr_idx + FLAGS_quote.size() <= line.size() &&
line.compare(curr_idx, FLAGS_quote.size(), FLAGS_quote) == 0;
};
do {
std::getline(stream, line);
auto line_size = line.size();
for (auto i = 0; i < line_size; ++i) {
auto c = line[i];
if (quoting) {
if (check_quote(i)) {
quoting = false;
i += FLAGS_quote.size() - 1;
} else {
column.push_back(c);
}
} else if (check_quote(i)) {
// Hopefully, escaping isn't needed
quoting = true;
i += FLAGS_quote.size() - 1;
} else if (c == FLAGS_csv_delimiter.front()) {
row.emplace_back(column.begin(), column.end());
column.clear();
// Handle special case when delimiter is the last
// character in line. This means that another
// empty column needs to be added.
if (i == line_size - 1) {
row.emplace_back("");
}
} else {
column.push_back(c);
}
}
} while (quoting);
if (!column.empty()) row.emplace_back(column.begin(), column.end());
return row;
}
std::vector<Field> ReadHeader(std::istream &stream) {
auto row = ReadRow(stream);
std::vector<Field> fields;
fields.reserve(row.size());
for (const auto &value : row) {
auto name_and_type = utils::Split(value, ":");
CHECK(name_and_type.size() == 1U || name_and_type.size() == 2U)
<< fmt::format(
"\nExpected a name and optionally a type, got '{}'.\nDid you "
"specify a correct CSV delimiter?",
value);
auto name = name_and_type[0];
// When type is missing, default is string.
std::string type("string");
if (name_and_type.size() == 2U)
type = utils::ToLowerCase(utils::Trim(name_and_type[1]));
fields.push_back(Field{name, type});
}
return fields;
}
communication::bolt::Value StringToValue(const std::string &str,
const std::string &type) {
// Empty string signifies Null.
if (str.empty()) return communication::bolt::Value();
auto convert = [](const auto &str,
const auto &type) -> communication::bolt::Value {
if (type == "int" || type == "long" || type == "byte" || type == "short") {
std::istringstream ss(str);
int64_t val;
ss >> val;
return val;
} else if (type == "float" || type == "double") {
return utils::ParseDouble(str);
} else if (type == "boolean") {
return utils::ToLowerCase(str) == "true" ? true : false;
} else if (type == "char" || type == "string") {
return str;
}
LOG(FATAL) << "Unexpected type: " << type;
return communication::bolt::Value();
};
// Type *not* ending with '[]', signifies regular value.
if (!utils::EndsWith(type, "[]")) return convert(str, type);
// Otherwise, we have an array type.
auto elem_type = type.substr(0, type.size() - 2);
auto elems = utils::Split(str, FLAGS_array_delimiter);
std::vector<communication::bolt::Value> array;
array.reserve(elems.size());
for (const auto &elem : elems) {
array.push_back(convert(std::string(utils::Trim(elem)), elem_type));
}
return array;
}
std::string GetIdSpace(const std::string &type) {
auto start = type.find("(");
if (start == std::string::npos) return "";
return type.substr(start + 1, type.size() - 1);
}
void WriteNodeRow(
communication::bolt::BaseEncoder<HashedFileWriter> *encoder,
const std::vector<Field> &fields, const std::vector<std::string> &row,
const std::vector<std::string> &additional_labels,
MemgraphNodeIdMap &node_id_map) {
std::optional<storage::Gid> id;
std::vector<std::string> labels;
std::map<std::string, communication::bolt::Value> properties;
for (int i = 0; i < row.size(); ++i) {
const auto &field = fields[i];
std::string value(utils::Trim(row[i]));
if (utils::StartsWith(field.type, "id")) {
CHECK(!id) << "Only one node ID must be specified";
NodeId node_id{value, GetIdSpace(field.type)};
if (node_id_map.Get(node_id)) {
if (FLAGS_skip_duplicate_nodes) {
LOG(WARNING) << fmt::format("Skipping duplicate node with id '{}'",
node_id);
return;
} else {
LOG(FATAL) << fmt::format("Node with id '{}' already exists",
node_id);
}
}
id = node_id_map.Insert(node_id);
properties["id"] = node_id.id;
} else if (field.type == "label") {
for (const auto &label : utils::Split(value, FLAGS_array_delimiter)) {
labels.emplace_back(utils::Trim(label));
}
} else if (field.type != "ignore") {
properties[field.name] = StringToValue(value, field.type);
}
}
labels.insert(labels.end(), additional_labels.begin(),
additional_labels.end());
CHECK(id) << "Node ID must be specified";
encoder->WriteVertex(
{communication::bolt::Id::FromUint(id->AsUint()), labels, properties});
}
auto PassNodes(
communication::bolt::BaseEncoder<HashedFileWriter> *encoder,
const std::string &nodes_path, MemgraphNodeIdMap &node_id_map,
const std::vector<std::string> &additional_labels) {
int64_t node_count = 0;
std::ifstream nodes_file(nodes_path);
CHECK(nodes_file) << fmt::format("Unable to open '{}'", nodes_path);
auto fields = ReadHeader(nodes_file);
auto row = ReadRow(nodes_file);
while (!row.empty()) {
CHECK_EQ(row.size(), fields.size())
<< "Expected as many values as there are header fields";
WriteNodeRow(encoder, fields, row, additional_labels, node_id_map);
// Increase count and move to next row.
node_count += 1;
row = ReadRow(nodes_file);
}
return node_count;
}
void WriteRelationshipsRow(
communication::bolt::BaseEncoder<HashedFileWriter> *encoder,
const std::vector<Field> &fields, const std::vector<std::string> &row,
const MemgraphNodeIdMap &node_id_map, storage::Gid relationship_id) {
std::optional<storage::Gid> start_id;
std::optional<storage::Gid> end_id;
std::optional<std::string> relationship_type;
std::map<std::string, communication::bolt::Value> properties;
for (int i = 0; i < row.size(); ++i) {
const auto &field = fields[i];
std::string value(utils::Trim(row[i]));
if (utils::StartsWith(field.type, "start_id")) {
CHECK(!start_id) << "Only one node ID must be specified";
NodeId node_id{value, GetIdSpace(field.type)};
start_id = node_id_map.Get(node_id);
if (!start_id)
LOG(FATAL) << fmt::format("Node with id '{}' does not exist", node_id);
} else if (utils::StartsWith(field.type, "end_id")) {
CHECK(!end_id) << "Only one node ID must be specified";
NodeId node_id{value, GetIdSpace(field.type)};
end_id = node_id_map.Get(node_id);
if (!end_id)
LOG(FATAL) << fmt::format("Node with id '{}' does not exist", node_id);
} else if (field.type == "type") {
CHECK(!relationship_type)
<< "Only one relationship TYPE must be specified";
relationship_type = value;
} else if (field.type != "ignore") {
properties[field.name] = StringToValue(value, field.type);
}
}
auto rel_type = utils::Trim(FLAGS_relationship_type);
if (!rel_type.empty()) {
relationship_type = FLAGS_relationship_type;
}
CHECK(start_id) << "START_ID must be set";
CHECK(end_id) << "END_ID must be set";
CHECK(relationship_type) << "Relationship TYPE must be set";
auto bolt_id = communication::bolt::Id::FromUint(relationship_id.AsUint());
auto bolt_start_id = communication::bolt::Id::FromUint(start_id->AsUint());
auto bolt_end_id = communication::bolt::Id::FromUint(end_id->AsUint());
encoder->WriteEdge(
{bolt_id, bolt_start_id, bolt_end_id, *relationship_type, properties});
}
int PassRelationships(
communication::bolt::BaseEncoder<HashedFileWriter> *encoder,
const std::string &relationships_path, const MemgraphNodeIdMap &node_id_map,
storage::GidGenerator &relationship_id_generator) {
std::ifstream relationships_file(relationships_path);
CHECK(relationships_file)
<< fmt::format("Unable to open '{}'", relationships_path);
auto fields = ReadHeader(relationships_file);
auto row = ReadRow(relationships_file);
int64_t relationships = 0;
while (!row.empty()) {
CHECK_EQ(row.size(), fields.size())
<< "Expected as many values as there are header fields";
WriteRelationshipsRow(encoder, fields, row, node_id_map,
relationship_id_generator.Next());
++relationships;
row = ReadRow(relationships_file);
}
return relationships;
}
void Convert(const std::vector<std::string> &nodes,
const std::vector<std::string> &additional_labels,
const std::vector<std::string> &relationships,
const std::string &output_path) {
try {
HashedFileWriter buffer(output_path);
communication::bolt::BaseEncoder<HashedFileWriter> encoder(buffer);
int64_t node_count = 0;
int64_t edge_count = 0;
storage::GidGenerator relationship_id_generator;
MemgraphNodeIdMap node_id_map;
// Snapshot file has the following contents in order:
// 1) Magic number.
// 2) Transaction ID of the snapshooter. When generated set to 0.
// 3) Transactional snapshot of the snapshoter. When the snapshot is
// generated it's an empty list.
// 4) List of label+property index.
// 5) All nodes, sequentially, but not encoded as a list.
// 6) All relationships, sequentially, but not encoded as a list.
// 7) Summary with node count, relationship count and hash digest.
encoder.WriteRAW(durability::kSnapshotMagic.data(),
durability::kSnapshotMagic.size());
encoder.WriteValue(durability::kVersion);
encoder.WriteInt(0); // Id of transaction that is snapshooting.
encoder.WriteList({}); // Transactional snapshot.
encoder.WriteList({}); // Label + property indexes.
encoder.WriteList({}); // Unique constraints
// PassNodes streams vertices to the encoder.
for (const auto &nodes_file : nodes) {
node_count +=
PassNodes(&encoder, nodes_file, node_id_map, additional_labels);
}
// PassEdges streams edges to the encoder.
for (const auto &relationships_file : relationships) {
edge_count += PassRelationships(&encoder, relationships_file, node_id_map,
relationship_id_generator);
}
buffer.WriteValue(node_count);
buffer.WriteValue(edge_count);
buffer.WriteValue(buffer.hash());
} catch (const std::ios_base::failure &) {
// Only HashedFileWriter sets the underlying fstream to throw.
LOG(FATAL) << fmt::format("Unable to write to '{}'", output_path);
}
}
static const char *usage =
"[OPTION]... [--out=SNAPSHOT_FILE] [--nodes=CSV_FILE]... "
"[--relationships=CSV_FILE]...\n"
"Create a Memgraph recovery snapshot file from CSV.\n";
// Used only to get the value from memgraph's configuration files.
DECLARE_string(durability_directory);
std::string GetOutputPath() {
// If we have the 'out' flag, use that.
if (!utils::Trim(FLAGS_out).empty()) return FLAGS_out;
// Without the 'out', fall back to reading the memgraph configuration for
// durability_directory. Hopefully, memgraph configuration doesn't contain
// other flags which are defined in this file.
LoadConfig();
// Without durability_directory, we have to require 'out' flag.
std::string durability_dir(utils::Trim(FLAGS_durability_directory));
if (durability_dir.empty())
LOG(FATAL) << "Unable to determine snapshot output location. Please, "
"provide the 'out' flag";
try {
auto snapshot_dir = durability_dir + "/snapshots";
if (!std::filesystem::exists(snapshot_dir) &&
!std::filesystem::create_directories(snapshot_dir)) {
LOG(FATAL) << fmt::format("Cannot create snapshot directory '{}'",
snapshot_dir);
}
} catch (const std::filesystem::filesystem_error &error) {
LOG(FATAL) << error.what();
}
// TODO: Remove this stupid hack which deletes WAL files just to make snapshot
// recovery work. Newest snapshot without accompanying WAL files should be
// detected in memgraph and correctly recovered (or error reported).
try {
auto wal_dir = durability_dir + "/wal";
if (std::filesystem::exists(wal_dir)) {
for ([[gnu::unused]] const auto &wal_file :
std::filesystem::directory_iterator(wal_dir)) {
if (!FLAGS_overwrite) {
LOG(FATAL) << "Durability directory isn't empty. Pass --overwrite to "
"remove the old recovery data";
}
break;
}
LOG(WARNING) << "Removing old recovery data!";
std::filesystem::remove_all(wal_dir);
}
} catch (const std::filesystem::filesystem_error &error) {
LOG(FATAL) << error.what();
}
return std::string(
durability::MakeSnapshotPath(durability_dir, 0));
}
int main(int argc, char *argv[]) {
gflags::SetUsageMessage(usage);
auto nodes = ParseRepeatedFlag("nodes", argc, argv);
auto additional_labels = ParseRepeatedFlag("node-label", argc, argv);
auto relationships = ParseRepeatedFlag("relationships", argc, argv);
gflags::ParseCommandLineFlags(&argc, &argv, true);
google::InitGoogleLogging(argv[0]);
std::string output_path(GetOutputPath());
if (std::filesystem::exists(output_path) && !FLAGS_overwrite) {
LOG(FATAL) << fmt::format(
"File exists: '{}'. Pass --overwrite if you want to overwrite.",
output_path);
}
auto iter_all_inputs = iter::chain(nodes, relationships);
std::vector<std::string> all_inputs(iter_all_inputs.begin(),
iter_all_inputs.end());
LOG(INFO) << fmt::format("Converting {} to '{}'",
utils::Join(all_inputs, ", "), output_path);
utils::Timer conversion_timer;
Convert(nodes, additional_labels, relationships, output_path);
double conversion_sec = conversion_timer.Elapsed().count();
LOG(INFO) << fmt::format("Created '{}' in {:.2f} seconds", output_path,
conversion_sec);
return 0;
}

View File

@ -1,18 +0,0 @@
include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
add_executable(mg_recovery_check mg_recovery_check.cpp)
target_link_libraries(mg_recovery_check mg-single-node gtest gtest_main kvstore_dummy_lib)
# Copy CSV data to CMake build dir
configure_file(csv/comment_nodes.csv csv/comment_nodes.csv COPYONLY)
configure_file(csv/comment_nodes_2.csv csv/comment_nodes_2.csv COPYONLY)
configure_file(csv/forum_nodes.csv csv/forum_nodes.csv COPYONLY)
configure_file(csv/relationships_0.csv csv/relationships_0.csv COPYONLY)
configure_file(csv/relationships_1.csv csv/relationships_1.csv COPYONLY)
# Copy the actual runner to CMake build dir
configure_file(test_mg_import_csv test_mg_import_csv COPYONLY)
add_test(NAME test_mg_import_csv
COMMAND test_mg_import_csv
--mg-import-csv ../src/mg_import_csv
--mg-recovery-check ./mg_recovery_check)

View File

@ -1,39 +0,0 @@
#!/usr/bin/env python3
import json
import os
import re
import subprocess
# paths
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
TESTS_DIR_REL = os.path.join("..", "..", "build_debug", "tools", "tests")
TESTS_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, TESTS_DIR_REL))
# ctest tests
ctest_output = subprocess.run(["ctest", "-N"], cwd=TESTS_DIR, check=True,
stdout=subprocess.PIPE).stdout.decode("utf-8")
runs = []
# test ordering: first unit, then concurrent, then everything else
for row in ctest_output.split("\n"):
# Filter rows only containing tests.
if not re.match("^\s*Test\s+#", row): continue
name = row.split(":")[1].strip()
path = os.path.join(TESTS_DIR_REL, name)
dirname, basename = os.path.split(path)
files = [basename, "CTestTestfile.cmake"]
# extra files for specific tests
if name == "test_mg_import_csv":
files.extend(["csv", "mg_recovery_check", "../src/mg_import_csv"])
runs.append({
"name": "tools__" + name,
"cd": dirname,
"commands": "ctest --output-on-failure -R \"^{}$\"".format(name),
"infiles": files,
})
print(json.dumps(runs, indent=4, sort_keys=True))

View File

@ -1,7 +0,0 @@
id:ID(COMMENT_ID)|country|browser:string|content:string|:LABEL
0|Croatia|Chrome|yes|Message;Comment
1|ÖUnited
KingdomÖ|Chrome|thanks|Message;Comment
2|Germany||LOL|Message;Comment
3|France|Firefox|I see|Message;Comment
4|Italy|Internet Explorer|fine|Message;Comment
1 id:ID(COMMENT_ID)|country|browser:string|content:string|:LABEL
2 0|Croatia|Chrome|yes|Message;Comment
3 1|ÖUnited
4 KingdomÖ|Chrome|thanks|Message;Comment
5 2|Germany||LOL|Message;Comment
6 3|France|Firefox|I see|Message;Comment
7 4|Italy|Internet Explorer|fine|Message;Comment

View File

@ -1,6 +0,0 @@
id:ID(COMMENT_ID)|country:string|browser:string|content:string|:LABEL
0|Croatia|Chrome|yes|Message;Comment
1|"United Kingdom"|Chrome|thanks|Message;Comment
2|Germany||LOL|Message;Comment
3|France|Firefox|I see|Message;Comment
4|Italy|Internet Explorer|fine|Message;Comment
1 id:ID(COMMENT_ID) country:string browser:string content:string :LABEL
2 0 Croatia Chrome yes Message;Comment
3 1 United Kingdom Chrome thanks Message;Comment
4 2 Germany LOL Message;Comment
5 3 France Firefox I see Message;Comment
6 4 Italy Internet Explorer fine Message;Comment

View File

@ -1,6 +0,0 @@
id:ID(FORUM_ID)|title:string|:LABEL|emptyColumn
0|General|Forum|
1|Support|Forum|
2|Music|Forum|
3|Film|Forum|
4|Programming|Forum|
1 id:ID(FORUM_ID) title:string :LABEL emptyColumn
2 0 General Forum
3 1 Support Forum
4 2 Music Forum
5 3 Film Forum
6 4 Programming Forum

View File

@ -1,4 +0,0 @@
:START_ID(COMMENT_ID)|:END_ID(FORUM_ID)|:TYPE
0|0|POSTED_ON
1|1|POSTED_ON
2|2|POSTED_ON
1 :START_ID(COMMENT_ID) :END_ID(FORUM_ID) :TYPE
2 0 0 POSTED_ON
3 1 1 POSTED_ON
4 2 2 POSTED_ON

View File

@ -1,3 +0,0 @@
:START_ID(COMMENT_ID)|:END_ID(FORUM_ID)|:TYPE
3|3|POSTED_ON
4|4|POSTED_ON
1 :START_ID(COMMENT_ID) :END_ID(FORUM_ID) :TYPE
2 3 3 POSTED_ON
3 4 4 POSTED_ON

View File

@ -1,104 +0,0 @@
#include <optional>
#include <string>
#include <gflags/gflags.h>
#include <gtest/gtest.h>
#include "database/single_node/graph_db.hpp"
#include "database/single_node/graph_db_accessor.hpp"
#include "durability/single_node/recovery.hpp"
#include "query/typed_value.hpp"
static const char *usage =
"--durability-dir DURABILITY_DIR\n"
"Check that Memgraph can recover the snapshot. This tool should be "
"invoked through 'test_mg_import' wrapper, so as to check that 'mg_import' "
"tools work correctly.\n";
DEFINE_string(durability_dir, "", "Path to where the durability directory");
class RecoveryTest : public ::testing::Test {
protected:
void SetUp() override {
std::string durability_dir(FLAGS_durability_dir);
durability::RecoveryData recovery_data;
durability::RecoverOnlySnapshot(durability_dir, &db_, &recovery_data,
std::nullopt);
durability::RecoveryTransactions recovery_transactions(&db_);
durability::RecoverWal(durability_dir, &db_, &recovery_data,
&recovery_transactions);
durability::RecoverIndexes(&db_, recovery_data.indexes);
}
database::GraphDb db_;
};
TEST_F(RecoveryTest, TestVerticesRecovered) {
auto dba = db_.Access();
EXPECT_EQ(dba.VerticesCount(), 10);
EXPECT_EQ(dba.VerticesCount(dba.Label("Comment")), 5);
for (const auto &vertex : dba.Vertices(dba.Label("Comment"), false)) {
EXPECT_TRUE(vertex.has_label(dba.Label("Message")));
}
EXPECT_EQ(dba.VerticesCount(dba.Label("Forum")), 5);
}
TEST_F(RecoveryTest, TestPropertyNull) {
auto dba = db_.Access();
bool found = false;
for (const auto &vertex : dba.Vertices(dba.Label("Comment"), false)) {
auto id_prop = query::TypedValue(vertex.PropsAt(dba.Property("id")));
auto browser = query::TypedValue(vertex.PropsAt(dba.Property("browser")));
if (id_prop.IsString() && id_prop.ValueString() == "2") {
EXPECT_FALSE(found);
found = true;
EXPECT_TRUE(browser.IsNull());
} else {
EXPECT_FALSE(browser.IsNull());
}
}
ASSERT_TRUE(found);
}
TEST_F(RecoveryTest, TestEdgesRecovered) {
auto dba = db_.Access();
EXPECT_EQ(dba.EdgesCount(), 5);
for (const auto &edge : dba.Edges(false)) {
EXPECT_TRUE(edge.EdgeType() == dba.EdgeType("POSTED_ON"));
}
}
TEST_F(RecoveryTest, TestQuote) {
auto dba = db_.Access();
for (const auto &vertex : dba.Vertices(dba.Label("Comment"), false)) {
auto id_prop = query::TypedValue(vertex.PropsAt(dba.Property("id")));
auto country = query::TypedValue(vertex.PropsAt(dba.Property("country")));
if (id_prop.IsString() && id_prop.ValueString() == "1") {
EXPECT_TRUE(country.IsString());
EXPECT_EQ(country.ValueString(), "United Kingdom");
}
}
}
TEST_F(RecoveryTest, TestNodeLabelFlag) {
auto dba = db_.Access();
for (const auto &vertex : dba.Vertices(false)) {
EXPECT_TRUE(vertex.has_label(dba.Label("First")));
EXPECT_TRUE(vertex.has_label(dba.Label("Second")));
}
}
TEST_F(RecoveryTest, TestRelationshipType) {
auto dba = db_.Access();
EXPECT_EQ(dba.EdgesCount(), 5);
for (const auto &edge : dba.Edges(false)) {
EXPECT_TRUE(edge.EdgeType() == dba.EdgeType("TYPE"));
}
}
int main(int argc, char *argv[]) {
::testing::InitGoogleTest(&argc, argv);
gflags::SetUsageMessage(usage);
gflags::ParseCommandLineFlags(&argc, &argv, true);
return RUN_ALL_TESTS();
}

View File

@ -1,65 +0,0 @@
#!/usr/bin/env python3
'''Run mg_import_csv and test that recovery works with mg_recovery_check.'''
import argparse
import subprocess
import os
import tempfile
_SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def parse_args():
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('--mg-import-csv', required=True,
help='Path to mg_import_csv executable.')
argp.add_argument('--mg-recovery-check', required=True,
help='Path to mg_recovery_check executable.')
return argp.parse_args()
def main():
args = parse_args()
comment_nodes = os.path.join(_SCRIPT_DIR, 'csv', 'comment_nodes.csv')
comment_nodes_2 = os.path.join(_SCRIPT_DIR, 'csv', 'comment_nodes_2.csv')
forum_nodes = os.path.join(_SCRIPT_DIR, 'csv', 'forum_nodes.csv')
relationships_0 = os.path.join(_SCRIPT_DIR, 'csv', 'relationships_0.csv')
relationships_1 = os.path.join(_SCRIPT_DIR, 'csv', 'relationships_1.csv')
with tempfile.TemporaryDirectory(suffix='-durability',
dir=_SCRIPT_DIR) as durability_dir:
snapshot_dir = os.path.join(durability_dir, 'snapshots')
os.makedirs(snapshot_dir, exist_ok=True)
out_snapshot = os.path.join(snapshot_dir, 'snapshot')
mg_import_csv = [args.mg_import_csv, '--nodes', comment_nodes,
'--nodes={}'.format(forum_nodes),
'--node-label', 'First', '--node-label', 'Second',
'--relationships={}'.format(relationships_0),
'--relationships', relationships_1,
'--out', out_snapshot, '--csv-delimiter=|',
'--quote', 'Ö', # test multi-char quote
'--array-delimiter=;']
subprocess.check_call(mg_import_csv)
mg_recovery_check = [args.mg_recovery_check,
'--durability-dir', durability_dir,
'--gtest_filter=*RecoveryTest*:-*RelationshipType*'] # noqa
subprocess.check_call(mg_recovery_check)
# New snapshot to test relationship type flag and single-char quote
out_snapshot = os.path.join(snapshot_dir, 'snapshot2')
mg_import_csv = [args.mg_import_csv, '--nodes', comment_nodes_2,
'--nodes={}'.format(forum_nodes),
'--relationships={}'.format(relationships_0),
'--relationships', relationships_1,
'--relationship-type', "TYPE",
'--out', out_snapshot, '--csv-delimiter=|',
'--quote', '\"', '--array-delimiter=;']
subprocess.check_call(mg_import_csv)
mg_recovery_check = [args.mg_recovery_check,
'--durability-dir', durability_dir,
'--gtest_filter=*RelationshipType*:*Quote*']
subprocess.check_call(mg_recovery_check)
if __name__ == '__main__':
main()