Merge branch 'tests' into demo
This commit is contained in:
commit
ed919b615a
@ -4,6 +4,7 @@
|
||||
#include <unordered_map>
|
||||
|
||||
#include "logging/default.hpp"
|
||||
#include "storage/indexes/index_definition.hpp"
|
||||
#include "transactions/transaction.hpp"
|
||||
|
||||
class SnapshotEncoder;
|
||||
@ -42,9 +43,14 @@ private:
|
||||
void snapshot(DbTransaction const &dt, SnapshotEncoder &snap,
|
||||
tx::TransactionRead const &old_trans);
|
||||
|
||||
// Loads snapshot. True if success
|
||||
bool snapshot_load(DbAccessor &t, SnapshotDecoder &snap);
|
||||
// Loads snapshot. True if success. Returns indexes which were in snapshot.
|
||||
std::vector<IndexDefinition> snapshot_load(DbAccessor &t,
|
||||
SnapshotDecoder &snap);
|
||||
|
||||
// Adds indexes. Should be called outside transactions.
|
||||
void add_indexes(std::vector<IndexDefinition> &v);
|
||||
|
||||
// Will return different name on every call.
|
||||
std::string snapshot_file(std::time_t const &now, const char *type);
|
||||
|
||||
std::string snapshot_commit_file();
|
||||
|
@ -20,6 +20,61 @@ public:
|
||||
|
||||
// Adds index defined in given definition. Returns true if successfull.
|
||||
bool add_index(IndexDefinition id);
|
||||
//
|
||||
// // Returns index from location.
|
||||
// template <class TG, class K>
|
||||
// Option<IndexHolder<TG, K>> get_index(IndexLocation loc)
|
||||
// {
|
||||
// size_t code = loc.location_code();
|
||||
//
|
||||
// switch (code) {
|
||||
// case 0: // Illegal location
|
||||
// return Option<IndexHolder<TG, K>>();
|
||||
//
|
||||
// case 1:
|
||||
// switch (loc.side) {
|
||||
// case EdgeSide: {
|
||||
// return make_option(
|
||||
// db.graph.edges
|
||||
// .property_family_find_or_create(loc.property_name.get())
|
||||
// .index);
|
||||
// }
|
||||
// case VertexSide: {
|
||||
// return make_option(
|
||||
// db.graph.vertices
|
||||
// .property_family_find_or_create(loc.property_name.get())
|
||||
// .index);
|
||||
// }
|
||||
// default:
|
||||
// throw new NonExhaustiveSwitch("Unkown side: " +
|
||||
// std::to_string(loc.side));
|
||||
// };
|
||||
//
|
||||
// case 2: // Can't be removed
|
||||
// return Option<IndexHolder<TG, K>>();
|
||||
//
|
||||
// case 3: // Not yet implemented
|
||||
// throw new NotYetImplemented("Getting index over label and "
|
||||
// "property isn't yet implemented");
|
||||
// case 4: // Can't be removed
|
||||
// return Option<IndexHolder<TG, K>>();
|
||||
//
|
||||
// case 5: // Not yet implemented
|
||||
// throw new NotYetImplemented("Getting index over edge_type and "
|
||||
// "property isn't yet implemented");
|
||||
// case 6: // Not yet implemented
|
||||
// throw new NotYetImplemented("Getting index over edge_type and "
|
||||
// "label isn't yet implemented");
|
||||
// case 7: // Not yet implemented
|
||||
// throw new NotYetImplemented("Getting index over label, edge_type
|
||||
// "
|
||||
// "and property isn't yet
|
||||
// implemented");
|
||||
// default:
|
||||
// throw new NonExhaustiveSwitch("Unkown index location code: " +
|
||||
// std::to_string(code));
|
||||
// }
|
||||
// }
|
||||
|
||||
// Removes index from given location. Returns true if successfull or if no
|
||||
// index was present. False if index location is illegal.
|
||||
@ -98,7 +153,7 @@ public:
|
||||
f);
|
||||
}
|
||||
|
||||
// Updates property indexes for given TypeGroup TG and IU index update
|
||||
// Updates property indexes for given TypeGroup TG and IU index_update
|
||||
template <class TG, class IU>
|
||||
bool update_property_indexes(IU &iu, const tx::Transaction &t)
|
||||
{
|
||||
|
@ -27,6 +27,11 @@ void add_scores(Db &db, double max_value, std::string const &property_name)
|
||||
t.commit();
|
||||
}
|
||||
|
||||
// Tool to add double propertys to all vertices.
|
||||
// // Accepts flags for csv import.
|
||||
// -db name , will create database with that name.
|
||||
// -pn name , will name property with that name,default: name=score.
|
||||
// -max number , will set range of property [0,max], default: max=1
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
logging::init_async();
|
||||
|
@ -156,22 +156,18 @@ bool SnapshotEngine::import()
|
||||
std::fstream::binary);
|
||||
SnapshotDecoder decoder(snapshot_file);
|
||||
|
||||
if (snapshot_load(t, decoder)) {
|
||||
if (t.commit()) {
|
||||
logger.info("Succesfully imported snapshot \"{}\"",
|
||||
snapshots.back());
|
||||
success = true;
|
||||
break;
|
||||
auto indexes = snapshot_load(t, decoder);
|
||||
if (t.commit()) {
|
||||
logger.info("Succesfully imported snapshot \"{}\"",
|
||||
snapshots.back());
|
||||
add_indexes(indexes);
|
||||
success = true;
|
||||
break;
|
||||
|
||||
} else {
|
||||
logger.info("Unuccesfully tryed to import snapshot "
|
||||
"\"{}\" because indexes where unuccesfully "
|
||||
"with updating",
|
||||
snapshots.back());
|
||||
}
|
||||
} else {
|
||||
t.abort();
|
||||
logger.info("Unuccesfully tryed to import snapshot \"{}\"",
|
||||
logger.info("Unuccesfully tryed to import snapshot "
|
||||
"\"{}\" because indexes where unuccesfully "
|
||||
"with updating",
|
||||
snapshots.back());
|
||||
}
|
||||
|
||||
@ -241,7 +237,8 @@ void SnapshotEngine::snapshot(DbTransaction const &dt, SnapshotEncoder &snap,
|
||||
snap.end();
|
||||
}
|
||||
|
||||
bool SnapshotEngine::snapshot_load(DbAccessor &t, SnapshotDecoder &snap)
|
||||
std::vector<IndexDefinition>
|
||||
SnapshotEngine::snapshot_load(DbAccessor &t, SnapshotDecoder &snap)
|
||||
{
|
||||
std::unordered_map<uint64_t, VertexAccessor> vertices;
|
||||
|
||||
@ -268,23 +265,37 @@ bool SnapshotEngine::snapshot_load(DbAccessor &t, SnapshotDecoder &snap)
|
||||
|
||||
// Load indexes
|
||||
snap.start_indexes();
|
||||
std::vector<IndexDefinition> indexes;
|
||||
while (!snap.end()) {
|
||||
// This will add index.
|
||||
indexes.push_back(snap.load_index());
|
||||
}
|
||||
|
||||
return indexes;
|
||||
}
|
||||
|
||||
void SnapshotEngine::add_indexes(std::vector<IndexDefinition> &v)
|
||||
{
|
||||
logger.info("Adding: {} indexes", v.size());
|
||||
for (auto id : v) {
|
||||
// TODO: It is alright for now to ignore if add_index return false. I am
|
||||
// not even sure if false should stop snapshot loading.
|
||||
if (!db.indexes().add_index(snap.load_index())) {
|
||||
if (!db.indexes().add_index(id)) {
|
||||
logger.warn("Failed to add index, but still continuing with "
|
||||
"loading snapshot");
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string SnapshotEngine::snapshot_file(std::time_t const &now,
|
||||
const char *type)
|
||||
{
|
||||
return snapshot_db_dir() + "/" + std::to_string(now) + "_" + type;
|
||||
auto now_nano = std::chrono::time_point_cast<std::chrono::nanoseconds>(
|
||||
std::chrono::high_resolution_clock::now())
|
||||
.time_since_epoch()
|
||||
.count() %
|
||||
(1000 * 1000 * 1000);
|
||||
return snapshot_db_dir() + "/" + std::to_string(now) + "_" +
|
||||
std::to_string(now_nano) + "_" + type;
|
||||
}
|
||||
|
||||
std::string SnapshotEngine::snapshot_commit_file()
|
||||
|
@ -13,7 +13,7 @@ bool Indexes::add_index(IndexDefinition id)
|
||||
// Creates transaction and during it's creation adds index into it's
|
||||
// place. Also created finish closure which will add necessary elements
|
||||
// into index.
|
||||
DbTransaction t(db, db.tx_engine.begin([&, id](auto &t) mutable {
|
||||
DbTransaction t(db, db.tx_engine.begin([&](auto &t) mutable {
|
||||
size_t code = id.loc.location_code();
|
||||
|
||||
switch (code) {
|
||||
|
@ -89,6 +89,15 @@ target_link_libraries(snapshot ${yaml_static_lib})
|
||||
add_test(NAME snapshot COMMAND snapshot)
|
||||
set_property(TARGET snapshot PROPERTY CXX_STANDARD 14)
|
||||
|
||||
# test index validity
|
||||
add_executable(index integration/index.cpp)
|
||||
target_link_libraries(index memgraph)
|
||||
target_link_libraries(index Threads::Threads)
|
||||
target_link_libraries(index ${fmt_static_lib})
|
||||
target_link_libraries(index ${yaml_static_lib})
|
||||
add_test(NAME index COMMAND index)
|
||||
set_property(TARGET index PROPERTY CXX_STANDARD 14)
|
||||
|
||||
# test query engine
|
||||
add_executable(integration_query_engine integration/query_engine.cpp)
|
||||
target_link_libraries(integration_query_engine Threads::Threads)
|
||||
|
@ -13,12 +13,16 @@
|
||||
#include "data_structures/concurrent/concurrent_multiset.hpp"
|
||||
#include "data_structures/concurrent/concurrent_set.hpp"
|
||||
#include "data_structures/concurrent/skiplist.hpp"
|
||||
#include "data_structures/concurrent/concurrent_list.hpp"
|
||||
#include "data_structures/static_array.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "logging/default.hpp"
|
||||
#include "logging/streams/stdout.hpp"
|
||||
#include "utils/sysinfo/memory.hpp"
|
||||
|
||||
// Sets max number of threads that will be used in concurrent tests.
|
||||
constexpr int max_no_threads=8;
|
||||
|
||||
using std::cout;
|
||||
using std::endl;
|
||||
using map_t = ConcurrentMap<int, int>;
|
||||
@ -64,6 +68,26 @@ void check_present_same(typename S::Accessor &acc,
|
||||
|
||||
// Checks if reported size and traversed size are equal to given size.
|
||||
template <typename S>
|
||||
void check_size_list(S &acc, long long size)
|
||||
{
|
||||
// check size
|
||||
|
||||
permanent_assert(acc.size() == size, "Size should be " << size
|
||||
<< ", but size is "
|
||||
<< acc.size());
|
||||
|
||||
// check count
|
||||
|
||||
size_t iterator_counter = 0;
|
||||
|
||||
for (auto elem : acc) {
|
||||
++iterator_counter;
|
||||
}
|
||||
permanent_assert(iterator_counter == size, "Iterator count should be "
|
||||
<< size << ", but size is "
|
||||
<< iterator_counter);
|
||||
}
|
||||
template <typename S>
|
||||
void check_size(typename S::Accessor &acc, long long size)
|
||||
{
|
||||
// check size
|
||||
@ -83,6 +107,7 @@ void check_size(typename S::Accessor &acc, long long size)
|
||||
<< size << ", but size is "
|
||||
<< iterator_counter);
|
||||
}
|
||||
|
||||
// Checks if order in list is maintened. It expects map
|
||||
template <typename S>
|
||||
void check_order(typename S::Accessor &acc)
|
||||
|
77
tests/concurrent/conncurent_list.cpp
Normal file
77
tests/concurrent/conncurent_list.cpp
Normal file
@ -0,0 +1,77 @@
|
||||
#include "common.h"
|
||||
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e2;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
constexpr size_t max_number = 10;
|
||||
constexpr size_t no_find_per_change = 2;
|
||||
constexpr size_t no_insert_for_one_delete = 1;
|
||||
|
||||
// This test simulates behavior of transactions.
|
||||
// Each thread makes a series of finds interleaved with method which change.
|
||||
// Exact ratio of finds per change and insert per delete can be regulated with
|
||||
// no_find_per_change and no_insert_for_one_delete.
|
||||
int main()
|
||||
{
|
||||
init_log();
|
||||
memory_check(THREADS_NO, [] {
|
||||
ConcurrentList<std::pair<int, int>> list;
|
||||
|
||||
auto futures = run<std::pair<long long, long long>>(
|
||||
THREADS_NO, [&](auto index) mutable {
|
||||
auto rand = rand_gen(key_range);
|
||||
auto rand_change = rand_gen_bool(no_find_per_change);
|
||||
auto rand_delete = rand_gen_bool(no_insert_for_one_delete);
|
||||
long long sum = 0;
|
||||
long long count = 0;
|
||||
|
||||
for (int i = 0; i < op_per_thread; i++) {
|
||||
auto num = rand();
|
||||
auto data = num % max_number;
|
||||
if (rand_change()) {
|
||||
if (rand_delete()) {
|
||||
for (auto it = list.begin(); it != list.end();
|
||||
it++) {
|
||||
if (it->first == num) {
|
||||
if (it.remove()) {
|
||||
sum -= data;
|
||||
count--;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
list.begin().push(std::make_pair(num, data));
|
||||
sum += data;
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
for (auto &v : list) {
|
||||
if (v.first == num) {
|
||||
permanent_assert(v.second == data,
|
||||
"Data is invalid");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return std::pair<long long, long long>(sum, count);
|
||||
});
|
||||
|
||||
auto it = list.begin();
|
||||
long long sums = 0;
|
||||
long long counters = 0;
|
||||
for (auto &data : collect(futures)) {
|
||||
sums += data.second.first;
|
||||
counters += data.second.second;
|
||||
}
|
||||
|
||||
for (auto &e : list) {
|
||||
sums -= e.second;
|
||||
}
|
||||
permanent_assert(sums == 0, "Same values aren't present");
|
||||
check_size_list<ConcurrentList<std::pair<int, int>>>(list, counters);
|
||||
});
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
constexpr size_t bit_part_len = 2;
|
||||
constexpr size_t no_slots = 1e4;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 4
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
constexpr size_t up_border_bit_set_pow2 = 3;
|
||||
constexpr size_t key_range =
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
constexpr size_t key_range = op_per_thread * THREADS_NO * 3;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 4
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
constexpr size_t up_border_bit_set_pow2 = 3;
|
||||
constexpr size_t key_range =
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
|
||||
constexpr size_t elems_per_thread = 100000;
|
||||
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t elems_per_thread = 100000;
|
||||
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t elems_per_thread = 1e5;
|
||||
|
||||
int main()
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
|
||||
constexpr size_t elements = 2e6;
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 1
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 1);
|
||||
constexpr size_t elems_per_thread = 16e5;
|
||||
|
||||
// Known memory leak at 1,600,000 elements.
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 4
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
constexpr size_t no_insert_for_one_delete = 1;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
constexpr size_t max_number = 10;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e5;
|
||||
constexpr size_t op_per_thread = 1e6;
|
||||
constexpr size_t no_insert_for_one_delete = 1;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e4;
|
||||
constexpr size_t op_per_thread = 1e5;
|
||||
constexpr size_t no_insert_for_one_delete = 2;
|
||||
|
@ -1,6 +1,6 @@
|
||||
#include "common.h"
|
||||
|
||||
#define THREADS_NO 8
|
||||
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
|
||||
constexpr size_t key_range = 1e5;
|
||||
constexpr size_t op_per_thread = 1e6;
|
||||
// Depending on value there is a possiblity of numerical overflow
|
||||
|
240
tests/integration/index.cpp
Normal file
240
tests/integration/index.cpp
Normal file
@ -0,0 +1,240 @@
|
||||
#include "query_engine/hardcode/queries.hpp"
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "barrier/barrier.cpp"
|
||||
|
||||
#include "logging/default.hpp"
|
||||
#include "logging/streams/stdout.hpp"
|
||||
#include "query_engine/query_stripper.hpp"
|
||||
#include "storage/indexes/indexes.hpp"
|
||||
#include "utils/sysinfo/memory.hpp"
|
||||
|
||||
// Returns uniform random size_t generator from range [0,n>
|
||||
auto rand_gen(size_t n)
|
||||
{
|
||||
std::default_random_engine generator;
|
||||
std::uniform_int_distribution<size_t> distribution(0, n - 1);
|
||||
return std::bind(distribution, generator);
|
||||
}
|
||||
|
||||
void run(size_t n, std::string &query, Db &db)
|
||||
{
|
||||
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
|
||||
auto qf = load_queries(barrier::trans(db));
|
||||
auto stripped = stripper.strip(query);
|
||||
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
|
||||
<< std::endl;
|
||||
for (int i = 0; i < n; i++) {
|
||||
properties_t vec = stripped.arguments;
|
||||
assert(qf[stripped.hash](std::move(vec)));
|
||||
}
|
||||
}
|
||||
|
||||
void add_edge(size_t n, Db &db)
|
||||
{
|
||||
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
|
||||
auto qf = load_queries(barrier::trans(db));
|
||||
std::string query = "MATCH (n1), (n2) WHERE ID(n1)=0 AND "
|
||||
"ID(n2)=1 CREATE (n1)<-[r:IS {age: "
|
||||
"25,weight: 70}]-(n2) RETURN r";
|
||||
|
||||
auto stripped = stripper.strip(query);
|
||||
std::cout << "Running query [" << stripped.hash << "] for " << n
|
||||
<< " time to add edge." << std::endl;
|
||||
|
||||
std::vector<int64_t> vertices;
|
||||
for (auto &v : db.graph.vertices.access()) {
|
||||
vertices.push_back(v.second.id);
|
||||
}
|
||||
|
||||
auto rand = rand_gen(vertices.size());
|
||||
for (int i = 0; i < n; i++) {
|
||||
properties_t vec = stripped.arguments;
|
||||
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
|
||||
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
|
||||
assert(qf[stripped.hash](std::move(vec)));
|
||||
}
|
||||
}
|
||||
|
||||
void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
t.vertex_access().fill().for_all([&](auto va) { va.set(prop); });
|
||||
|
||||
assert(t.commit());
|
||||
}
|
||||
|
||||
void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
auto key = f.get(Int64::type).family_key();
|
||||
|
||||
size_t i = 0;
|
||||
t.vertex_access().fill().for_all([&](auto va) mutable {
|
||||
va.set(StoredProperty<TypeGroupVertex>(Int64(i), key));
|
||||
i++;
|
||||
});
|
||||
|
||||
assert(t.commit());
|
||||
}
|
||||
|
||||
void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
auto key = f.get(Int64::type).family_key();
|
||||
|
||||
size_t i = 0;
|
||||
t.edge_access().fill().for_all([&](auto va) mutable {
|
||||
va.set(StoredProperty<TypeGroupEdge>(Int64(i), key));
|
||||
i++;
|
||||
});
|
||||
|
||||
assert(t.commit());
|
||||
}
|
||||
|
||||
template <class TG>
|
||||
size_t size(Db &db, IndexHolder<TG, std::nullptr_t> &h)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
size_t count = 0;
|
||||
auto oin = h.get_read();
|
||||
if (oin.is_present()) {
|
||||
oin.get()->for_range(t).for_all([&](auto va) mutable { count++; });
|
||||
}
|
||||
|
||||
t.commit();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
void assert_empty(Db &db)
|
||||
{
|
||||
assert(db.graph.vertices.access().size() == 0);
|
||||
assert(db.graph.edges.access().size() == 0);
|
||||
}
|
||||
|
||||
void clean_vertex(Db &db)
|
||||
{
|
||||
DbTransaction t(db);
|
||||
t.clean_vertex_section();
|
||||
t.trans.commit();
|
||||
}
|
||||
|
||||
void clean_edge(Db &db)
|
||||
{
|
||||
DbTransaction t(db);
|
||||
t.clean_edge_section();
|
||||
t.trans.commit();
|
||||
}
|
||||
|
||||
void clear_database(Db &db)
|
||||
{
|
||||
std::string delete_all_vertices = "MATCH (n) DELETE n";
|
||||
std::string delete_all_edges = "MATCH ()-[r]-() DELETE r";
|
||||
|
||||
run(1, delete_all_edges, db);
|
||||
run(1, delete_all_vertices, db);
|
||||
clean_vertex(db);
|
||||
clean_edge(db);
|
||||
assert_empty(db);
|
||||
}
|
||||
|
||||
bool equal(Db &a, Db &b)
|
||||
{
|
||||
{
|
||||
auto acc_a = a.graph.vertices.access();
|
||||
auto acc_b = b.graph.vertices.access();
|
||||
|
||||
if (acc_a.size() != acc_b.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto it_a = acc_a.begin();
|
||||
auto it_b = acc_b.begin();
|
||||
|
||||
for (auto i = acc_a.size(); i > 0; i--) {
|
||||
// TODO: compare
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
auto acc_a = a.graph.edges.access();
|
||||
auto acc_b = b.graph.edges.access();
|
||||
|
||||
if (acc_a.size() != acc_b.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto it_a = acc_a.begin();
|
||||
auto it_b = acc_b.begin();
|
||||
|
||||
for (auto i = acc_a.size(); i > 0; i--) {
|
||||
// TODO: compare
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
logging::init_async();
|
||||
logging::log->pipe(std::make_unique<Stdout>());
|
||||
|
||||
size_t cvl_n = 1000;
|
||||
|
||||
std::string create_vertex_label =
|
||||
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
|
||||
std::string create_vertex_other =
|
||||
"CREATE (n:OTHER {name: \"cleaner_test\"}) RETURN n";
|
||||
std::string delete_label_vertices = "MATCH (n:LABEL) DELETE n";
|
||||
std::string delete_all_vertices = "MATCH (n) DELETE n";
|
||||
|
||||
IndexDefinition vertex_property_nonunique_unordered = {
|
||||
IndexLocation{VertexSide, Option<std::string>("prop"),
|
||||
Option<std::string>(), Option<std::string>()},
|
||||
IndexType{false, None}};
|
||||
IndexDefinition edge_property_nonunique_unordered = {
|
||||
IndexLocation{EdgeSide, Option<std::string>("prop"),
|
||||
Option<std::string>(), Option<std::string>()},
|
||||
IndexType{false, None}};
|
||||
IndexDefinition edge_property_unique_ordered = {
|
||||
IndexLocation{EdgeSide, Option<std::string>("prop"),
|
||||
Option<std::string>(), Option<std::string>()},
|
||||
IndexType{true, Ascending}};
|
||||
IndexDefinition vertex_property_unique_ordered = {
|
||||
IndexLocation{VertexSide, Option<std::string>("prop"),
|
||||
Option<std::string>(), Option<std::string>()},
|
||||
IndexType{true, Ascending}};
|
||||
|
||||
// ******************************* TEST 1 ********************************//
|
||||
{
|
||||
std::cout << "TEST1" << std::endl;
|
||||
// add indexes
|
||||
// add vertices LABEL
|
||||
// add edges
|
||||
// add vertices property
|
||||
// assert index size.
|
||||
Db db("index", false);
|
||||
assert(db.indexes().add_index(vertex_property_nonunique_unordered));
|
||||
assert(db.indexes().add_index(edge_property_nonunique_unordered));
|
||||
run(cvl_n, create_vertex_label, db);
|
||||
add_edge(cvl_n, db);
|
||||
assert(cvl_n ==
|
||||
size(db, db.graph.vertices.property_family_find_or_create("prop")
|
||||
.index));
|
||||
assert(
|
||||
cvl_n ==
|
||||
size(db,
|
||||
db.graph.edges.property_family_find_or_create("prop").index));
|
||||
}
|
||||
|
||||
// TODO: more tests
|
||||
|
||||
return 0;
|
||||
}
|
@ -7,6 +7,7 @@
|
||||
#include "logging/default.hpp"
|
||||
#include "logging/streams/stdout.hpp"
|
||||
#include "query_engine/query_stripper.hpp"
|
||||
#include "storage/indexes/indexes.hpp"
|
||||
#include "utils/sysinfo/memory.hpp"
|
||||
|
||||
// Returns uniform random size_t generator from range [0,n>
|
||||
@ -17,9 +18,10 @@ auto rand_gen(size_t n)
|
||||
return std::bind(distribution, generator);
|
||||
}
|
||||
|
||||
template <class S, class Q>
|
||||
void run(size_t n, std::string &query, S &stripper, Q &qf)
|
||||
void run(size_t n, std::string &query, Db &db)
|
||||
{
|
||||
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
|
||||
auto qf = load_queries(barrier::trans(db));
|
||||
auto stripped = stripper.strip(query);
|
||||
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
|
||||
<< std::endl;
|
||||
@ -29,10 +31,10 @@ void run(size_t n, std::string &query, S &stripper, Q &qf)
|
||||
}
|
||||
}
|
||||
|
||||
template <class S, class Q>
|
||||
void add_edge(size_t n, Db &db, S &stripper, Q &qf)
|
||||
void add_edge(size_t n, Db &db)
|
||||
{
|
||||
|
||||
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
|
||||
auto qf = load_queries(barrier::trans(db));
|
||||
std::string query = "MATCH (n1), (n2) WHERE ID(n1)=0 AND "
|
||||
"ID(n2)=1 CREATE (n1)<-[r:IS {age: "
|
||||
"25,weight: 70}]-(n2) RETURN r";
|
||||
@ -55,6 +57,45 @@ void add_edge(size_t n, Db &db, S &stripper, Q &qf)
|
||||
}
|
||||
}
|
||||
|
||||
void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
t.vertex_access().fill().for_all([&](auto va) { va.set(prop); });
|
||||
|
||||
assert(t.commit());
|
||||
}
|
||||
|
||||
void add_property_different_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
auto key = f.get(Int64::type).family_key();
|
||||
|
||||
size_t i = 0;
|
||||
t.vertex_access().fill().for_all([&](auto va) mutable {
|
||||
va.set(StoredProperty<TypeGroupVertex>(Int64(i), key));
|
||||
i++;
|
||||
});
|
||||
|
||||
assert(t.commit());
|
||||
}
|
||||
|
||||
size_t size(Db &db, IndexHolder<TypeGroupVertex, std::nullptr_t> &h)
|
||||
{
|
||||
DbAccessor t(db);
|
||||
|
||||
size_t count = 0;
|
||||
auto oin = h.get_read();
|
||||
if (oin.is_present()) {
|
||||
oin.get()->for_range(t).for_all([&](auto va) mutable { count++; });
|
||||
}
|
||||
|
||||
t.commit();
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
void assert_empty(Db &db)
|
||||
{
|
||||
assert(db.graph.vertices.access().size() == 0);
|
||||
@ -75,14 +116,13 @@ void clean_edge(Db &db)
|
||||
t.trans.commit();
|
||||
}
|
||||
|
||||
template <class S, class Q>
|
||||
void clear_database(Db &db, S &stripper, Q &qf)
|
||||
void clear_database(Db &db)
|
||||
{
|
||||
std::string delete_all_vertices = "MATCH (n) DELETE n";
|
||||
std::string delete_all_edges = "MATCH ()-[r]-() DELETE r";
|
||||
|
||||
run(1, delete_all_edges, stripper, qf);
|
||||
run(1, delete_all_vertices, stripper, qf);
|
||||
run(1, delete_all_edges, db);
|
||||
run(1, delete_all_vertices, db);
|
||||
clean_vertex(db);
|
||||
clean_edge(db);
|
||||
assert_empty(db);
|
||||
@ -132,12 +172,6 @@ int main(void)
|
||||
|
||||
size_t cvl_n = 1000;
|
||||
|
||||
Db db("snapshot");
|
||||
|
||||
auto query_functions = load_queries(barrier::trans(db));
|
||||
|
||||
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
|
||||
|
||||
std::string create_vertex_label =
|
||||
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
|
||||
std::string create_vertex_other =
|
||||
@ -145,51 +179,93 @@ int main(void)
|
||||
std::string delete_label_vertices = "MATCH (n:LABEL) DELETE n";
|
||||
std::string delete_all_vertices = "MATCH (n) DELETE n";
|
||||
|
||||
// ********************* MAKE SURE THAT DB IS EMPTY **********************//
|
||||
clear_database(db, stripper, query_functions);
|
||||
|
||||
std::cout << "TEST1" << std::endl;
|
||||
// ******************************* TEST 1 ********************************//
|
||||
// make snapshot of empty db
|
||||
// add vertexs
|
||||
// add edges
|
||||
// empty database
|
||||
// import snapshot
|
||||
// assert database empty
|
||||
|
||||
db.snap_engine.make_snapshot();
|
||||
run(cvl_n, create_vertex_label, stripper, query_functions);
|
||||
add_edge(cvl_n, db, stripper, query_functions);
|
||||
clear_database(db, stripper, query_functions);
|
||||
db.snap_engine.import();
|
||||
assert_empty(db);
|
||||
|
||||
std::cout << "TEST2" << std::endl;
|
||||
// ******************************* TEST 2 ********************************//
|
||||
// add vertexs
|
||||
// add edges
|
||||
// make snapshot of db
|
||||
// empty database
|
||||
// import snapshot
|
||||
// create new db
|
||||
// compare database with new db
|
||||
|
||||
run(cvl_n, create_vertex_label, stripper, query_functions);
|
||||
add_edge(cvl_n, db, stripper, query_functions);
|
||||
db.snap_engine.make_snapshot();
|
||||
clear_database(db, stripper, query_functions);
|
||||
db.snap_engine.import();
|
||||
{
|
||||
Db db2("snapshot");
|
||||
assert(equal(db, db2));
|
||||
std::cout << "TEST1" << std::endl;
|
||||
// make snapshot of empty db
|
||||
// add vertexs
|
||||
// add edges
|
||||
// empty database
|
||||
// import snapshot
|
||||
// assert database empty
|
||||
Db db("snapshot", false);
|
||||
db.snap_engine.make_snapshot();
|
||||
run(cvl_n, create_vertex_label, db);
|
||||
add_edge(cvl_n, db);
|
||||
clear_database(db);
|
||||
db.snap_engine.import();
|
||||
assert_empty(db);
|
||||
}
|
||||
|
||||
std::cout << "TEST3" << std::endl;
|
||||
// ******************************* TEST 3 ********************************//
|
||||
// compare database with different named database
|
||||
// ******************************* TEST 2 ********************************//
|
||||
{
|
||||
Db db2("not_snapshot");
|
||||
assert(!equal(db, db2));
|
||||
std::cout << "TEST2" << std::endl;
|
||||
// add vertexs
|
||||
// add edges
|
||||
// make snapshot of db
|
||||
// empty database
|
||||
// import snapshot
|
||||
// create new db
|
||||
// compare database with new db
|
||||
Db db("snapshot", false);
|
||||
run(cvl_n, create_vertex_label, db);
|
||||
add_edge(cvl_n, db);
|
||||
db.snap_engine.make_snapshot();
|
||||
clear_database(db);
|
||||
db.snap_engine.import();
|
||||
{
|
||||
Db db2("snapshot");
|
||||
assert(equal(db, db2));
|
||||
}
|
||||
}
|
||||
|
||||
// ******************************* TEST 3 ********************************//
|
||||
{
|
||||
std::cout << "TEST3" << std::endl;
|
||||
// add vertexs
|
||||
// add edges
|
||||
// make snapshot of db
|
||||
// compare database with different named database
|
||||
Db db("snapshot", false);
|
||||
run(cvl_n, create_vertex_label, db);
|
||||
add_edge(cvl_n, db);
|
||||
db.snap_engine.make_snapshot();
|
||||
{
|
||||
Db db2("not_snapshot");
|
||||
assert(!equal(db, db2));
|
||||
}
|
||||
}
|
||||
|
||||
// ******************************* TEST 4 ********************************//
|
||||
{
|
||||
std::cout << "TEST4" << std::endl;
|
||||
// add vertices LABEL
|
||||
// add properties
|
||||
// add vertices LABEL
|
||||
// add index on proprety
|
||||
// assert index containts vertices
|
||||
// make snapshot
|
||||
// create new db
|
||||
// assert index on LABEL in new db exists
|
||||
// assert index in new db containts vertice
|
||||
Db db("snapshot", false);
|
||||
run(cvl_n, create_vertex_label, db);
|
||||
auto &family = db.graph.vertices.property_family_find_or_create("prop");
|
||||
add_property_different_int(db, family);
|
||||
run(cvl_n, create_vertex_other, db);
|
||||
IndexDefinition idef = {
|
||||
IndexLocation{VertexSide, Option<std::string>("prop"),
|
||||
Option<std::string>(), Option<std::string>()},
|
||||
IndexType{false, None}};
|
||||
assert(db.indexes().add_index(idef));
|
||||
assert(cvl_n == size(db, family.index));
|
||||
db.snap_engine.make_snapshot();
|
||||
{
|
||||
Db db2("snapshot");
|
||||
assert(cvl_n == size(db, db2.graph.vertices
|
||||
.property_family_find_or_create("prop")
|
||||
.index));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: more tests
|
||||
|
Loading…
Reference in New Issue
Block a user