This commit is contained in:
sale 2017-01-11 09:18:10 +00:00
commit d475b79a58
100 changed files with 4750 additions and 1853 deletions

View File

@ -1,4 +1,4 @@
{
"project_id" : "memgraph",
"conduit_uri" : "https://memgraph.phacility.com"
"conduit_uri" : "https://phabricator.memgraph.io"
}

2436
Doxyfile Normal file

File diff suppressed because it is too large Load Diff

BIN
Doxylogo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.6 KiB

View File

@ -118,7 +118,8 @@ FILE(COPY ${include_dir}/utils/char_str.hpp DESTINATION ${build_include_dir}/uti
FILE(COPY ${include_dir}/utils/void.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/array_store.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/bswap.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/stacktrace.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/stacktrace/stacktrace.hpp DESTINATION ${build_include_dir}/utils/stacktrace)
FILE(COPY ${include_dir}/utils/stacktrace/log.hpp DESTINATION ${build_include_dir}/utils/stacktrace)
FILE(COPY ${include_dir}/utils/auto_scope.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/assert.hpp DESTINATION ${build_include_dir}/utils)
FILE(COPY ${include_dir}/utils/reference_wrapper.hpp DESTINATION ${build_include_dir}/utils)

2
docs/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
html/
latex/

7
docs/README.md Normal file
View File

@ -0,0 +1,7 @@
# Memgraph Code Documentation
IMPORTANT: auto-generated (run doxygen Doxyfile in the project root)
* HTML - just open docs/html/index.html
* Latex - run make inside docs/latex

View File

@ -1 +0,0 @@
# TODO

View File

@ -7,8 +7,11 @@ namespace bolt
namespace config
{
static constexpr size_t N = 65535; /* chunk size */
static constexpr size_t C = N + 2; /* end mark */
/** chunk size */
static constexpr size_t N = 65535;
/** end mark */
static constexpr size_t C = N + 2;
}
}

View File

@ -5,17 +5,38 @@ namespace bolt
enum class PackType
{
Null, // denotes absence of a value
Boolean, // denotes a type with two possible values (t/f)
Integer, // 64-bit signed integral number
Float, // 64-bit floating point number
Bytes, // binary data
String, // unicode string
List, // collection of values
Map, // collection of zero or more key/value pairs
Struct, // zero or more packstream values
EndOfStream, // denotes stream value end
Reserved // reserved for future use
/** denotes absence of a value */
Null,
/** denotes a type with two possible values (t/f) */
Boolean,
/** 64-bit signed integral number */
Integer,
/** 64-bit floating point number */
Float,
/** binary data */
Bytes,
/** unicode string */
String,
/** collection of values */
List,
/** collection of zero or more key/value pairs */
Map,
/** zero or more packstream values */
Struct,
/** denotes stream value end */
EndOfStream,
/** reserved for future use */
Reserved
};
}

View File

@ -27,7 +27,7 @@ class BoltSerializer
public:
BoltSerializer(Stream &stream) : encoder(stream) {}
/* Serializes the vertex accessor into the packstream format
/** Serializes the vertex accessor into the packstream format
*
* struct[size = 3] Vertex [signature = 0x4E] {
* Integer node_id;
@ -64,7 +64,7 @@ public:
}
}
/* Serializes the vertex accessor into the packstream format
/** Serializes the vertex accessor into the packstream format
*
* struct[size = 5] Edge [signature = 0x52] {
* Integer edge_id;
@ -79,7 +79,7 @@ public:
void write_null() { encoder.write_null(); }
void write(const Null &v) { encoder.write_null(); }
void write(const Null &) { encoder.write_null(); }
void write(const Bool &prop) { encoder.write_bool(prop.value()); }

View File

@ -10,9 +10,10 @@
namespace bolt
{
// compiled queries have to use this class in order to return results
// query code should not know about bolt protocol
/**
* compiled queries have to use this class in order to return results
* query code should not know about bolt protocol
*/
template <class Socket>
class RecordStream
{

View File

@ -1,4 +1,6 @@
#pragma once
/* Memgraph Communication protocol
/* Memgraph communication protocol
* gate is the first name proposal for the protocol */
// TODO

View File

@ -1,3 +1,3 @@
#pragma once
/* HTTP & HTTPS implementation */
/* TODO: HTTP & HTTPS implementations */

View File

@ -1,67 +1,76 @@
#pragma once
#include <bitset>
#include <iostream>
#include <vector>
/*
Implementation of a generic Bloom Filter.
Read more about bloom filters here:
http://en.wikipedia.org/wiki/Bloom_filter
http://www.jasondavies.com/bloomfilter/
*/
// Type specifies the type of data stored
/**
* Implementation of a generic Bloom Filter.
* Read more about bloom filters here:
* http://en.wikipedia.org/wiki/Bloom_filter
* http://www.jasondavies.com/bloomfilter/
*
* Type specifies the type of data stored
*/
template <class Type, int BucketSize = 8>
class BloomFilter {
private:
using HashFunction = std::function<uint64_t(const Type&)>;
using CompresionFunction = std::function<int(uint64_t)>;
class BloomFilter
{
private:
using HashFunction = std::function<uint64_t(const Type &)>;
using CompresionFunction = std::function<int(uint64_t)>;
std::bitset<BucketSize> filter_;
std::vector<HashFunction> hashes_;
CompresionFunction compression_;
std::vector<int> buckets;
std::bitset<BucketSize> filter_;
std::vector<HashFunction> hashes_;
CompresionFunction compression_;
std::vector<int> buckets;
int default_compression(uint64_t hash) { return hash % BucketSize; }
int default_compression(uint64_t hash) { return hash % BucketSize; }
void get_buckets(const Type& data) {
for (int i = 0; i < hashes_.size(); i++)
buckets[i] = compression_(hashes_[i](data));
}
void print_buckets(std::vector<uint64_t>& buckets) {
for (int i = 0; i < buckets.size(); i++) {
std::cout << buckets[i] << " ";
void get_buckets(const Type &data)
{
for (int i = 0; i < hashes_.size(); i++)
buckets[i] = compression_(hashes_[i](data));
}
std::cout << std::endl;
}
public:
BloomFilter(std::vector<HashFunction> funcs,
CompresionFunction compression = {})
: hashes_(funcs) {
if (!compression)
compression_ = std::bind(&BloomFilter::default_compression, this,
std::placeholders::_1);
else
compression_ = compression;
void print_buckets(std::vector<uint64_t> &buckets)
{
for (int i = 0; i < buckets.size(); i++)
{
std::cout << buckets[i] << " ";
}
std::cout << std::endl;
}
buckets.resize(hashes_.size());
}
public:
BloomFilter(std::vector<HashFunction> funcs,
CompresionFunction compression = {})
: hashes_(funcs)
{
if (!compression)
compression_ = std::bind(&BloomFilter::default_compression, this,
std::placeholders::_1);
else
compression_ = compression;
bool contains(const Type& data) {
get_buckets(data);
bool contains_element = true;
buckets.resize(hashes_.size());
}
for (int i = 0; i < buckets.size(); i++)
contains_element &= filter_[buckets[i]];
bool contains(const Type &data)
{
get_buckets(data);
bool contains_element = true;
return contains_element;
}
for (int i = 0; i < buckets.size(); i++)
contains_element &= filter_[buckets[i]];
void insert(const Type& data) {
get_buckets(data);
return contains_element;
}
for (int i = 0; i < buckets.size(); i++) filter_[buckets[i]] = true;
}
void insert(const Type &data)
{
get_buckets(data);
for (int i = 0; i < buckets.size(); i++)
filter_[buckets[i]] = true;
}
};

View File

@ -70,7 +70,7 @@ private:
{
assert(list != nullptr);
// Increment number of iterators accessing list.
list->count++;
list->active_threads_no_++;
// Start from the begining of list.
reset();
}
@ -99,7 +99,7 @@ private:
// Fetch could be relaxed
// There exist possibility that no one will delete garbage at this
// time but it will be deleted at some other time.
if (list->count.fetch_sub(1) == 1 && // I am the last one accessing
if (list->active_threads_no_.fetch_sub(1) == 1 && // I am the last one accessing
head_rem != nullptr && // There is some garbage
cas<Node *>(list->removed, head_rem,
nullptr) // No new garbage was added.
@ -177,6 +177,8 @@ private:
store(node->next, next);
// Then try to set as head.
} while (!cas(list->head, next, node));
list->count_.fetch_add(1);
}
// True only if this call removed the element. Only reason for fail is
@ -200,6 +202,7 @@ private:
}
// Add to list of to be garbage collected.
store(curr->next_rem, swap(list->removed, curr));
list->count_.fetch_sub(1);
return true;
}
return false;
@ -321,10 +324,14 @@ public:
ConstIterator cend() { return ConstIterator(); }
std::size_t size() { return count.load(std::memory_order_consume); }
std::size_t active_threads_no() { return active_threads_no_.load(); }
std::size_t size() { return count_.load(); }
private:
std::atomic<std::size_t> count{0};
// TODO: use lazy GC or something else as a garbage collection strategy
// use the same principle as in skiplist
std::atomic<std::size_t> active_threads_no_{0};
std::atomic<std::size_t> count_{0};
std::atomic<Node *> head{nullptr};
std::atomic<Node *> removed{nullptr};
};

View File

@ -5,9 +5,12 @@
using std::pair;
// Multi thread safe map based on skiplist.
// K - type of key.
// T - type of data.
/**
* Multi thread safe map based on skiplist.
*
* @tparam K is a type of key.
* @tparam T is a type of data.
*/
template <typename K, typename T>
class ConcurrentMap
{

View File

@ -5,9 +5,12 @@
using std::pair;
// Multi thread safe multi map based on skiplist.
// K - type of key.
// T - type of data.
/**
* Multi thread safe multi map based on skiplist.
*
* @tparam K is a type of key.
* @tparam T is a type of data.
*/
template <typename K, typename T>
class ConcurrentMultiMap
{

View File

@ -12,7 +12,7 @@
#include "data_structures/concurrent/skiplist_gc.hpp"
/* @brief Concurrent lock-based skiplist with fine grained locking
/** @brief Concurrent lock-based skiplist with fine grained locking
*
* From Wikipedia:
* "A skip list is a data structure that allows fast search within an
@ -97,11 +97,13 @@ template <class T, size_t H = 32, class lock_t = SpinLock>
class SkipList : private Lockable<lock_t>
{
public:
// computes the height for the new node from the interval [1...H]
// with p(k) = (1/2)^k for all k from the interval
/**
* computes the height for the new node from the interval [1...H]
* with p(k) = (1/2)^k for all k from the interval
*/
static thread_local FastBinomial<H> rnd;
/* @brief Wrapper class for flags used in the implementation
/** @brief Wrapper class for flags used in the implementation
*
* MARKED flag is used to logically delete a node.
* FULLY_LINKED is used to mark the node as fully inserted, i.e. linked
@ -224,12 +226,14 @@ public:
Placeholder<T> data;
// this creates an array of the size zero. we can't put any sensible
// value here since we don't know what size it will be untill the
// node is allocated. we could make it a Node** but then we would
// have two memory allocations, one for node and one for the forward
// list. this way we avoid expensive malloc/free calls and also cache
// thrashing when following a pointer on the heap
/**
* this creates an array of the size zero. we can't put any sensible
* value here since we don't know what size it will be untill the
* node is allocated. we could make it a Node** but then we would
* have two memory allocations, one for node and one for the forward
* list. this way we avoid expensive malloc/free calls and also cache
* thrashing when following a pointer on the heap
*/
std::atomic<Node *> tower[0];
};
@ -441,6 +445,7 @@ public:
}
private:
// TODO: figure why start is unused
static int update_path(SkipList *skiplist, int start, const K &item,
Node *preds[], Node *succs[])
{
@ -664,14 +669,18 @@ private:
return (node == nullptr) || item < node->value();
}
// Returns first occurence of item if there exists one.
/**
* Returns first occurence of item if there exists one.
*/
template <class K>
ConstIterator find(const K &item) const
{
return const_cast<SkipList *>(this)->find_node<ConstIterator, K>(item);
}
// Returns first occurence of item if there exists one.
/**
* Returns first occurence of item if there exists one.
*/
template <class K>
Iterator find(const K &item)
{
@ -689,7 +698,9 @@ private:
}
}
// Returns iterator on searched element or the first larger element.
/**
* Returns iterator on searched element or the first larger element.
*/
template <class It, class K>
It find_or_larger(const K &item)
{
@ -758,8 +769,11 @@ private:
return valid;
}
// Inserts non unique data into list.
// NOTE: Uses modified logic from insert method.
/**
* Inserts non unique data into list.
*
* NOTE: Uses modified logic from insert method.
*/
Iterator insert_non_unique(T &&data, Node *preds[], Node *succs[])
{
while (true) {
@ -823,9 +837,12 @@ private:
}
}
// Insert unique data
// F - type of funct which will create new node if needed. Recieves height
// of node.
/**
* Insert unique data
*
* F - type of funct which will create new node if needed. Recieves height
* of node.
*/
std::pair<Iterator, bool> insert(Node *preds[], Node *succs[], T &&data)
{
while (true) {
@ -857,8 +874,11 @@ private:
}
}
// Insert unique data
// NOTE: This is almost all duplicate code from insert.
/**
* Insert unique data
*
* NOTE: This is almost all duplicate code from insert.
*/
template <class K, class... Args>
std::pair<Iterator, bool> emplace(Node *preds[], Node *succs[], K &key,
Args &&... args)
@ -893,9 +913,11 @@ private:
}
}
// Inserts data to specified locked location.
/**
* Inserts data to specified locked location.
*/
Iterator insert_here(Node *new_node, Node *preds[], Node *succs[],
int height, guard_t guards[])
int height, guard_t guards[]) // TODO: querds unused
{
// Node::create(std::move(data), height)
// link the predecessors and successors, e.g.
@ -921,10 +943,12 @@ private:
!node->flags.is_marked();
}
// Remove item found with fp with arguments skiplist,preds and succs.
// fp has to fill preds and succs which reflect location of item or return
// -1 as in not found otherwise returns level on which the item was first
// found.
/**
* Removes item found with fp with arguments skiplist, preds and succs.
* fp has to fill preds and succs which reflect location of item or return
* -1 as in not found otherwise returns level on which the item was first
* found.
*/
template <class K>
bool remove(const K &item, Node *preds[], Node *succs[],
int (*fp)(SkipList *, int, const K &, Node *[], Node *[]))
@ -966,7 +990,9 @@ private:
}
}
// number of elements
/**
* number of elements
*/
std::atomic<size_t> count{0};
Node *header;
SkiplistGC<Node> gc;

View File

@ -1,46 +0,0 @@
#pragma once
#include <list>
#include "threading/sync/lockable.hpp"
#include "threading/sync/spinlock.hpp"
template <typename value_type, typename lock_type = SpinLock>
class LinkedList : public Lockable<lock_type>
{
public:
std::size_t size() const
{
auto guard = this->acquire_unique();
return data.size();
}
void push_front(const value_type &value)
{
auto guard = this->acquire_unique();
data.push_front(value);
}
void push_front(value_type &&value)
{
auto guard = this->acquire_unique();
data.push_front(std::forward<value_type>(value));
}
void pop_front()
{
auto guard = this->acquire_unique();
data.pop_front();
}
// value_type& as return value
// would not be concurrent
value_type front()
{
auto guard = this->acquire_unique();
return data.front();
}
private:
std::list<value_type> data;
};

View File

@ -1,34 +0,0 @@
#pragma once
#include <unordered_map>
#include "threading/sync/lockable.hpp"
#include "threading/sync/spinlock.hpp"
namespace lockfree
{
template <class K, class V>
class HashMap : Lockable<SpinLock>
{
public:
V at(const K& key)
{
auto guard = acquire_unique();
return hashmap[key];
}
void put(const K& key, const K& value)
{
auto guard = acquire_unique();
hashmap[key] = value;
}
private:
std::unordered_map<K, V> hashmap;
};
}

View File

@ -10,44 +10,85 @@
class Indexes;
// Main class which represents Database concept in code.
// TODO: Maybe split this in another layer between Db and Dbms. Where the new
// layer would hold SnapshotEngine and his kind of concept objects. Some
// guidelines would be: retain objects which are necessary to implement querys
// in Db, the rest can be moved to the new layer.
/**
* Main class which represents Database concept in code.
*/
class Db
{
public:
using sptr = std::shared_ptr<Db>;
// import_snapshot will in constructor import latest snapshot into the db.
// NOTE: explicit is here to prevent compiler from evaluating const char *
// into a bool.
/**
* This constructor will create a database with the name "default"
*
* NOTE: explicit is here to prevent compiler from evaluating const char *
* into a bool.
*
* @param import_snapshot will in constructor import latest snapshot
* into the db.
*/
explicit Db(bool import_snapshot = true);
// import_snapshot will in constructor import latest snapshot into the db.
/**
* Construct database with a custom name.
*
* @param name database name
* @param import_snapshot will in constructor import latest snapshot
* into the db.
*/
Db(const char *name, bool import_snapshot = true);
// import_snapshot will in constructor import latest snapshot into the db.
/**
* Construct database with a custom name.
*
* @param name database name
* @param import_snapshot will in constructor import latest snapshot
* into the db.
*/
Db(const std::string &name, bool import_snapshot = true);
/**
* Database object can't be copied.
*/
Db(const Db &db) = delete;
private:
/** database name */
const std::string name_;
public:
/** transaction engine related to this database */
tx::Engine tx_engine;
/** graph related to this database */
Graph graph;
/** garbage collector related to this database*/
Garbage garbage = {tx_engine};
// This must be initialized after name.
/**
* snapshot engine related to this database
*
* \b IMPORTANT: has to be initialized after name
* */
SnapshotEngine snap_engine = {*this};
// Creates Indexes for this db.
/**
* Creates Indexes for this database.
*/
Indexes indexes();
// TODO: Indexes should be created only once somwhere Like Db or layer
// between Db and Dbms.
Indexes indexes();
/**
* Returns a name of the database.
*
* @return database name
*/
std::string const &name() const;
};

View File

@ -5,14 +5,28 @@
#include "storage/label/label_store.hpp"
#include "storage/vertices.hpp"
/**
* Graph storage. Contains vertices and edges, labels and edges.
*/
class Graph
{
public:
Graph() {}
/**
* default constructor
*
* At the beginning the graph is empty.
*/
Graph() = default;
/** storage for all vertices related to this graph */
Vertices vertices;
/** storage for all edges related to this graph */
Edges edges;
/** storage for all labels */
LabelStore label_store;
/** storage for all types related for this graph */
EdgeTypeStore edge_type_store;
};

View File

@ -25,9 +25,12 @@
// parmanant exception will always be executed
#define permanent_assert(condition, message) \
if (!(condition)) { \
if (!(condition)) \
{ \
std::ostringstream s; \
s << message; \
std::cout << s.str() << std::endl; \
std::exit(EXIT_FAILURE); \
}
// assert_error_handler_(__FILE__, __LINE__, s.str().c_str());

View File

@ -2,33 +2,34 @@
#include <utility>
/* @brief Calls a cleanup function on scope exit
/**
* @brief Calls a cleanup function on scope exit
*
* consider this example:
* consider this example:
*
* void hard_worker()
* {
* resource.enable();
* do_stuff(); // throws exception
* resource.disable();
* }
* void hard_worker()
* {
* resource.enable();
* do_stuff(); // throws exception
* resource.disable();
* }
*
* if do_stuff throws an exception, resource.disable is never called
* and the app is left in an inconsistent state. ideally, you would like
* to call resource.disable regardles of the exception being thrown.
* OnScopeExit makes this possible and very convenient via a 'Auto' macro
* if do_stuff throws an exception, resource.disable is never called
* and the app is left in an inconsistent state. ideally, you would like
* to call resource.disable regardles of the exception being thrown.
* OnScopeExit makes this possible and very convenient via a 'Auto' macro
*
* void hard_worker()
* {
* resource.enable();
* Auto(resource.disable());
* do_stuff(); // throws exception
* }
* void hard_worker()
* {
* resource.enable();
* Auto(resource.disable());
* do_stuff(); // throws exception
* }
*
* now, resource.disable will be called every time it goes out of scope
* regardless of the exception
* now, resource.disable will be called every time it goes out of scope
* regardless of the exception
*
* @tparam F Lambda which holds a wrapper function around the cleanup code
* @tparam F Lambda which holds a wrapper function around the cleanup code
*/
template <class F>
class OnScopeExit
@ -55,3 +56,10 @@ private:
TOKEN_PASTE(auto_, counter)(TOKEN_PASTE(auto_func_, counter));
#define Auto(Destructor) Auto_INTERNAL(Destructor, __COUNTER__)
// -- example:
// Auto(f());
// -- is expended to:
// auto auto_func_1 = [&]() { f(); };
// OnScopeExit<decltype(auto_func_1)> auto_1(auto_func_1);
// -- f() is called at the end of a scope

View File

@ -4,39 +4,25 @@
#include <stdexcept>
#include "utils/auto_scope.hpp"
#include "utils/stacktrace.hpp"
#include "utils/stacktrace/stacktrace.hpp"
class BasicException : public std::exception {
public:
BasicException(const std::string &message, uint64_t stacktrace_size) noexcept
: message_(message),
stacktrace_size_(stacktrace_size) {
generate_stacktrace();
}
BasicException(const std::string &message) noexcept : message_(message),
stacktrace_size_(10) {
generate_stacktrace();
}
template <class... Args>
BasicException(const std::string &format, Args &&... args) noexcept
: BasicException(fmt::format(format, std::forward<Args>(args)...)) {}
const char *what() const noexcept override { return message_.c_str(); }
private:
std::string message_;
uint64_t stacktrace_size_;
void generate_stacktrace() {
#ifndef NDEBUG
Stacktrace stacktrace;
int size = std::min(stacktrace_size_, stacktrace.size());
for (int i = 0; i < size; i++) {
message_.append(fmt::format("\n at {} ({})", stacktrace[i].function,
stacktrace[i].location));
class BasicException : public std::exception
{
public:
BasicException(const std::string &message) noexcept : message_(message)
{
Stacktrace stacktrace;
message_.append(stacktrace.dump());
}
#endif
}
template <class... Args>
BasicException(const std::string &format, Args &&... args) noexcept
: BasicException(fmt::format(format, std::forward<Args>(args)...))
{
}
const char *what() const noexcept override { return message_.c_str(); }
private:
std::string message_;
};

View File

@ -5,6 +5,9 @@
#include "utils/auto_scope.hpp"
/* @brief Allocates blocks of block_size and stores
* the pointers on allocated blocks inside a vector.
*/
template <size_t block_size>
class BlockAllocator
{
@ -23,29 +26,45 @@ public:
BlockAllocator(size_t capacity = 0)
{
for (size_t i = 0; i < capacity; ++i)
blocks.emplace_back();
unused_.emplace_back();
}
~BlockAllocator()
{
for (auto b : blocks) {
free(b.data);
}
blocks.clear();
for (auto block : unused_)
free(block.data);
unused_.clear();
for (auto block : release_)
free(block.data);
release_.clear();
}
size_t unused_size() const
{
return unused_.size();
}
size_t release_size() const
{
return release_.size();
}
// Returns nullptr on no memory.
void *acquire()
{
if (blocks.size() == 0) blocks.emplace_back();
if (unused_.size() == 0) unused_.emplace_back();
auto ptr = blocks.back().data;
Auto(blocks.pop_back());
auto ptr = unused_.back().data;
Auto(unused_.pop_back());
return ptr;
}
void release(void *ptr) { blocks.emplace_back(ptr); }
void release(void *ptr) { release_.emplace_back(ptr); }
private:
std::vector<Block> blocks;
// TODO: try implement with just one vector
// but consecutive acquire release calls should work
// TODO: measure first!
std::vector<Block> unused_;
std::vector<Block> release_;
};

View File

@ -3,6 +3,7 @@
#include <cmath>
#include "utils/exceptions/out_of_memory.hpp"
#include "utils/likely.hpp"
#include "utils/memory/block_allocator.hpp"
// http://en.cppreference.com/w/cpp/language/new

View File

@ -8,34 +8,40 @@
using Function = std::function<void()>;
enum class Signal : int {
Terminate = SIGTERM,
SegmentationFault = SIGSEGV,
Interupt = SIGINT,
Quit = SIGQUIT,
Abort = SIGABRT
// TODO: align bits so signals can be combined
// Signal::Terminate | Signal::Interupt
enum class Signal : int
{
Terminate = SIGTERM,
SegmentationFault = SIGSEGV,
Interupt = SIGINT,
Quit = SIGQUIT,
Abort = SIGABRT,
BusError = SIGBUS,
};
class SignalHandler {
private:
static std::map<int, std::function<void()>> handlers_;
class SignalHandler
{
private:
static std::map<int, std::function<void()>> handlers_;
static void handle(int signal) { handlers_[signal](); }
static void handle(int signal) { handlers_[signal](); }
public:
static void register_handler(Signal signal, Function func) {
int signal_number = static_cast<int>(signal);
handlers_[signal_number] = func;
std::signal(signal_number, SignalHandler::handle);
}
// TODO possible changes if signelton needed later
/*
static SignalHandler& instance() {
static SignalHandler instance;
return instance;
public:
static void register_handler(Signal signal, Function func)
{
int signal_number = static_cast<int>(signal);
handlers_[signal_number] = func;
std::signal(signal_number, SignalHandler::handle);
}
*/
// TODO possible changes if signelton needed later
/*
static SignalHandler& instance() {
static SignalHandler instance;
return instance;
}
*/
};
std::map<int, std::function<void()>> SignalHandler::handlers_ = {};

View File

@ -0,0 +1,11 @@
#pragma once
#include "logging/default.hpp"
#include "utils/stacktrace/stacktrace.hpp"
void log_stacktrace(const std::string& title)
{
Stacktrace stacktrace;
logging::info(title);
logging::info(stacktrace.dump());
}

View File

@ -1,10 +1,10 @@
#pragma once
#include <cxxabi.h>
#include <stdexcept>
#include <execinfo.h>
#include <fmt/format.h>
#include <stdexcept>
#include "utils/auto_scope.hpp"
class Stacktrace
@ -13,11 +13,13 @@ public:
class Line
{
public:
Line(const std::string& original) : original(original) {}
Line(const std::string &original) : original(original) {}
Line(const std::string& original, const std::string& function,
const std::string& location)
: original(original), function(function), location(location) {}
Line(const std::string &original, const std::string &function,
const std::string &location)
: original(original), function(function), location(location)
{
}
std::string original, function, location;
};
@ -26,17 +28,17 @@ public:
Stacktrace()
{
void* addresses[stacktrace_depth];
void *addresses[stacktrace_depth];
auto depth = backtrace(addresses, stacktrace_depth);
// will this leak if backtrace_symbols throws?
char** symbols = nullptr;
char **symbols = nullptr;
Auto(free(symbols));
symbols = backtrace_symbols(addresses, depth);
// skip the first one since it will be Stacktrace::Stacktrace()
for(int i = 1; i < depth; ++i)
for (int i = 1; i < depth; ++i)
lines.emplace_back(format(symbols[i]));
}
@ -48,54 +50,53 @@ public:
auto end() const { return lines.end(); }
auto cend() const { return lines.cend(); }
const Line& operator[](size_t idx) const
{
return lines[idx];
}
const Line &operator[](size_t idx) const { return lines[idx]; }
size_t size() const
{
return lines.size();
}
size_t size() const { return lines.size(); }
template <class Stream>
void dump(Stream& stream) {
stream << dump();
void dump(Stream &stream)
{
stream << dump();
}
std::string dump() {
std::string message;
for (int i = 0; i < size(); i++) {
message.append(fmt::format("at {} ({}) \n", lines[i].function,
lines[i].location));
}
return message;
std::string dump()
{
std::string message;
for (size_t i = 0; i < size(); i++)
{
message.append(fmt::format("at {} ({}) \n", lines[i].function,
lines[i].location));
}
return message;
}
private:
std::vector<Line> lines;
Line format(const std::string& original)
Line format(const std::string &original)
{
using namespace abi;
auto line = original;
auto begin = line.find('(');
auto end = line.find('+');
auto end = line.find('+');
if(begin == std::string::npos || end == std::string::npos)
if (begin == std::string::npos || end == std::string::npos)
return {original};
line[end] = '\0';
int s;
auto demangled = __cxa_demangle(line.data() + begin + 1, nullptr,
nullptr, &s);
auto demangled =
__cxa_demangle(line.data() + begin + 1, nullptr, nullptr, &s);
auto location = line.substr(0, begin);
auto function = demangled ? std::string(demangled)
: fmt::format("{}()", original.substr(begin + 1, end - begin - 1));
auto function =
demangled ? std::string(demangled)
: fmt::format("{}()", original.substr(begin + 1,
end - begin - 1));
return {original, function, location};
}

View File

@ -1,24 +1,67 @@
#pragma mark
#include "sys/types.h"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "sys/sysinfo.h"
#include "sys/types.h"
auto total_virtual_memory()
{
struct sysinfo mem_info;
sysinfo (&mem_info);
long long total_virtual_memory = mem_info.totalram;
total_virtual_memory += mem_info.totalswap;
total_virtual_memory *= mem_info.mem_unit;
return total_virtual_memory;
struct sysinfo mem_info;
sysinfo(&mem_info);
long long total_virtual_memory = mem_info.totalram;
total_virtual_memory += mem_info.totalswap;
total_virtual_memory *= mem_info.mem_unit;
return total_virtual_memory;
}
auto used_virtual_memory()
{
struct sysinfo mem_info;
sysinfo (&mem_info);
struct sysinfo mem_info;
sysinfo(&mem_info);
long long virtual_memory_used = mem_info.totalram - mem_info.freeram;
virtual_memory_used += mem_info.totalswap - mem_info.freeswap;
virtual_memory_used *= mem_info.mem_unit;
return virtual_memory_used;
}
// TODO: OS dependent
/**
* parses memory line from /proc/self/status
*/
auto parse_vm_size(char *line)
{
// This assumes that a digit will be found and the line ends in " Kb".
auto i = std::strlen(line);
const char *p = line;
while (*p < '0' || *p > '9')
p++;
line[i - 3] = '\0';
return std::atoll(p);
}
/**
* returns VmSize in kB
*/
auto vm_size()
{
std::FILE *file = std::fopen("/proc/self/status", "r");
auto result = -1LL;
char line[128];
while (fgets(line, 128, file) != NULL)
{
if (strncmp(line, "VmSize:", 7) == 0)
{
result = parse_vm_size(line);
break;
}
}
fclose(file);
return result;
}

View File

@ -1,28 +1,34 @@
#pragma once
#include "utils/auto_scope.hpp"
#include "utils/stacktrace.hpp"
#include "utils/stacktrace/stacktrace.hpp"
#include <execinfo.h>
#include <iostream>
// TODO: log to local file or remote database
void stacktrace(std::ostream& stream) noexcept {
Stacktrace stacktrace;
stacktrace.dump(stream);
void stacktrace(std::ostream &stream) noexcept
{
Stacktrace stacktrace;
stacktrace.dump(stream);
}
// TODO: log to local file or remote database
void terminate_handler(std::ostream& stream) noexcept {
if (auto exc = std::current_exception()) {
try {
std::rethrow_exception(exc);
} catch (std::exception& ex) {
stream << ex.what() << std::endl << std::endl;
stacktrace(stream);
void terminate_handler(std::ostream &stream) noexcept
{
if (auto exc = std::current_exception())
{
try
{
std::rethrow_exception(exc);
}
catch (std::exception &ex)
{
stream << ex.what() << std::endl << std::endl;
stacktrace(stream);
}
}
}
std::abort();
std::abort();
}
void terminate_handler() noexcept { terminate_handler(std::cout); }

View File

@ -0,0 +1,7 @@
#pragma once
#include <chrono>
using namespace std::chrono_literals;
using ms = std::chrono::milliseconds;

View File

@ -1,14 +1,12 @@
#pragma once
#include <chrono>
#include <iostream>
#include <ratio>
#include <utility>
#define time_now() std::chrono::high_resolution_clock::now()
#include "utils/time/time.hpp"
using ns = std::chrono::nanoseconds;
using ms = std::chrono::milliseconds;
#define time_now() std::chrono::high_resolution_clock::now()
template <typename DurationUnit = std::chrono::nanoseconds>
auto to_duration(const std::chrono::duration<long, std::nano> &delta)

View File

@ -8,13 +8,15 @@
#include "logging/default.hpp"
/** @class Timer
* @brief The timer contains counter and handler.
/**
* @class Timer
*
* With every clock interval the counter should be decresed for
* delta count. Delta count is one for now but it should be a variable in the
* near future. The handler is function that will be called when counter
* becomes zero or smaller than zero.
* @brief The timer contains counter and handler.
*
* With every clock interval the counter should be decresed for
* delta count. Delta count is one for now but it should be a variable in the
* near future. The handler is function that will be called when counter
* becomes zero or smaller than zero.
*/
struct Timer
{
@ -48,14 +50,16 @@ struct Timer
* the process method.
*/
/** @class TimerSet
* @brief Trivial timer container implementation.
/**
* @class TimerSet
*
* Internal data stucture for storage of timers is std::set. So, the
* related timer complexities are:
* insertion: O(log(n))
* deletion: O(log(n))
* process: O(n)
* @brief Trivial timer container implementation.
*
* Internal data stucture for storage of timers is std::set. So, the
* related timer complexities are:
* insertion: O(log(n))
* deletion: O(log(n))
* process: O(n)
*/
class TimerSet
{
@ -70,6 +74,11 @@ public:
timers.erase(timer);
}
uint64_t size() const
{
return timers.size();
}
void process()
{
for (auto it = timers.begin(); it != timers.end(); ) {
@ -87,10 +96,17 @@ private:
std::set<std::shared_ptr<Timer>> timers;
};
/** @class TimerScheduler
* @brief TimerScheduler is a manager class and its responsibility is to
* take care of the time and call the timer_container process method in the
* appropriate time.
/**
* @class TimerScheduler
*
* @brief TimerScheduler is a manager class and its responsibility is to
* take care of the time and call the timer_container process method in the
* appropriate time.
*
* @tparam timer_container_type implements a strategy how the timers
* are processed
* @tparam delta_time_type type of a time distance between two events
* @tparam delta_time granularity between the two events, default value is 1
*/
template <
typename timer_container_type,
@ -99,19 +115,47 @@ template <
> class TimerScheduler
{
public:
/**
* Adds a timer.
*
* @param timer shared pointer to the timer object \ref Timer
*/
void add(Timer::sptr timer)
{
timer_container.add(timer);
}
/**
* Removes a timer.
*
* @param timer shared pointer to the timer object \ref Timer
*/
void remove(Timer::sptr timer)
{
timer_container.remove(timer);
}
/**
* Provides the number of pending timers. The exact number has to be
* provided by a timer_container.
*
* @return uint64_t the number of pending timers.
*/
uint64_t size() const
{
return timer_container.size();
}
/**
* Runs a separate thread which responsibility is to run the process method
* at the appropriate time (every delta_time from the beginning of
* processing.
*/
void run()
{
is_running.store(true);
run_thread = std::thread([this]() {
while (is_running.load()) {
std::this_thread::sleep_for(delta_time_type(delta_time));
@ -121,11 +165,17 @@ public:
});
}
/**
* Stops the whole processing.
*/
void stop()
{
is_running.store(false);
}
/**
* Joins the processing thread.
*/
~TimerScheduler()
{
run_thread.join();

View File

@ -1 +0,0 @@
// TODO

View File

@ -1 +0,0 @@
// TODO

View File

@ -23,7 +23,8 @@ void clean_version_lists(A &&acc, Id oldest_active)
{
// TODO: Optimization, iterator with remove method.
bool succ = acc.remove(vlist.first);
assert(succ); // There is other cleaner here
// There is other cleaner here
runtime_assert(succ, "Remove has failed");
}
}
}
@ -56,7 +57,7 @@ void DbTransaction::clean_vertex_section()
bool DbTransaction::update_indexes()
{
logger.debug("index_updates: {}, instance: {}, transaction: {}",
logger.trace("index_updates: {}, instance: {}, transaction: {}",
index_updates.size(), static_cast<void *>(this), trans.id);
while (!index_updates.empty())
@ -107,7 +108,7 @@ void DbTransaction::to_update_index(typename TG::vlist_t *vlist,
typename TG::record_t *record)
{
index_updates.emplace_back(make_index_update(vlist, record));
logger.debug("update_index, updates_no: {}, instance: {}, transaction: {}",
logger.trace("update_index, updates_no: {}, instance: {}, transaction: {}",
index_updates.size(), static_cast<void *>(this), trans.id);
}

View File

@ -1,5 +1,5 @@
#include <signal.h>
#include <iostream>
#include <signal.h>
#include "communication/bolt/v1/server/server.hpp"
#include "communication/bolt/v1/server/worker.hpp"
@ -11,13 +11,14 @@
#include "utils/config/config.hpp"
#include "utils/signals/handler.hpp"
#include "utils/stacktrace.hpp"
#include "utils/terminate_handler.hpp"
#include "utils/stacktrace/log.hpp"
static bolt::Server<bolt::Worker>* serverptr;
static bolt::Server<bolt::Worker> *serverptr;
Logger logger;
<<<<<<< HEAD
static constexpr const char* interface = "0.0.0.0";
static constexpr const char* port = "7687";
@ -30,13 +31,21 @@ int main(int argc, char** argv) {
// TODO figure out what is the relationship between this and signals
// that are configured below
std::set_terminate(&terminate_handler);
=======
// TODO: load from configuration
static constexpr const char *interface = "0.0.0.0";
static constexpr const char *port = "7687";
>>>>>>> e303f666d2f1d4073bcea6b6e6697e0651ead879
// logger init
int main(void)
{
// logging init
#ifdef SYNC_LOGGER
logging::init_sync();
logging::init_sync();
#else
logging::init_async();
logging::init_async();
#endif
<<<<<<< HEAD
logging::log->pipe(std::make_unique<Stdout>());
// get Main logger
@ -88,4 +97,57 @@ int main(int argc, char** argv) {
logger.info("Shutting down...");
return EXIT_SUCCESS;
=======
logging::log->pipe(std::make_unique<Stdout>());
// logger init
logger = logging::log->logger("Main");
logger.info("{}", logging::log->type());
// unhandled exception handler
std::set_terminate(&terminate_handler);
// signal handling
SignalHandler::register_handler(Signal::SegmentationFault, []() {
log_stacktrace("SegmentationFault signal raised");
std::exit(EXIT_FAILURE);
});
SignalHandler::register_handler(Signal::Terminate, []() {
log_stacktrace("Terminate signal raised");
std::exit(EXIT_FAILURE);
});
SignalHandler::register_handler(Signal::Abort, []() {
log_stacktrace("Abort signal raised");
std::exit(EXIT_FAILURE);
});
// initialize socket
io::Socket socket;
try
{
socket = io::Socket::bind(interface, port);
}
catch (io::NetworkError e)
{
logger.error("Cannot bind to socket on {} at {}", interface, port);
logger.error("{}", e.what());
std::exit(EXIT_FAILURE);
}
socket.set_non_blocking();
socket.listen(1024);
logger.info("Listening on {} at {}", interface, port);
// initialize server
bolt::Server<bolt::Worker> server(std::move(socket));
serverptr = &server;
// server start with N threads
// TODO: N should be configurable
auto N = std::thread::hardware_concurrency();
logger.info("Starting {} workers", N);
server.start(N);
logger.info("Shutting down...");
return EXIT_SUCCESS;
>>>>>>> e303f666d2f1d4073bcea6b6e6697e0651ead879
}

View File

@ -2,6 +2,7 @@
#include <cassert>
#include "utils/assert.hpp"
#include "storage/vertex_record.hpp"
#include "storage/edge_type/edge_type.hpp"
@ -10,10 +11,12 @@ void EdgeAccessor::remove() const
RecordAccessor::remove();
auto from_va = from();
assert(from_va.fill());
auto from_va_is_full = from_va.fill();
runtime_assert(from_va_is_full, "From Vertex Accessor is empty");
auto to_va = to();
assert(to_va.fill());
auto to_va_is_full = to_va.fill();
permanent_assert(to_va_is_full, "To Vertex Accessor is empty");
from_va.update().record->data.out.remove(vlist);
to_va.update().record->data.in.remove(vlist);

View File

@ -6,6 +6,9 @@ enable_testing()
include_directories(${catch_source_dir}/include)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/test_results/unit)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/test_results/benchmark)
# copy test data
file(COPY ${CMAKE_SOURCE_DIR}/tests/data
DESTINATION ${CMAKE_BINARY_DIR}/tests)

View File

@ -38,6 +38,9 @@ foreach(test_cpp ${test_type_cpps})
target_link_libraries(${target_name} ${yaml_static_lib})
# register test
add_test(${target_name} ${exec_name})
set(output_path
${CMAKE_BINARY_DIR}/test_results/benchmark/${target_name}.json)
add_test(${target_name} ${exec_name}
--benchmark_out_format=json --benchmark_out=${output_path})
endforeach()

View File

@ -1,6 +1,8 @@
#include <random>
#include <thread>
#include "benchmark/benchmark_api.h"
#include "data_structures/bloom/bloom_filter.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
@ -8,52 +10,49 @@
#include "utils/hashing/fnv64.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
using utils::random::StringGenerator;
using StringHashFunction = std::function<uint64_t(const std::string&)>;
using StringHashFunction = std::function<uint64_t(const std::string &)>;
template <class Type, int Size>
static void TestBloom(benchmark::State& state, BloomFilter<Type, Size>*
bloom, const std::vector<Type>& elements) {
while(state.KeepRunning()) {
for (int start = 0; start < state.range(0); start++)
if (start % 2) bloom->contains(elements[start]);
else bloom->insert(elements[start]);
}
state.SetComplexityN(state.range(0));
static void TestBloom(benchmark::State &state, BloomFilter<Type, Size> *bloom,
const std::vector<Type> &elements)
{
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
if (start % 2)
bloom->contains(elements[start]);
else
bloom->insert(elements[start]);
}
state.SetComplexityN(state.range(0));
}
auto BM_Bloom = [](benchmark::State& state, auto* bloom, const auto& elements) {
TestBloom(state, bloom, elements);
auto BM_Bloom = [](benchmark::State &state, auto *bloom, const auto &elements) {
TestBloom(state, bloom, elements);
};
void parse_args(int argc, char** argv) {}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
StringGenerator generator(4);
parse_args(argc, argv);
auto elements = utils::random::generate_vector(generator, 1 << 16);
StringGenerator generator(4);
auto elements = utils::random::generate_vector(generator, 1 << 16);
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {
hash1, hash2
};
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {hash1, hash2};
BloomFilter<std::string, 128> bloom(funcs);
BloomFilter<std::string, 128> bloom(funcs);
benchmark::RegisterBenchmark("SimpleBloomFilter Benchmark Test", BM_Bloom,
&bloom, elements)
->RangeMultiplier(2)
->Range(1, 1 << 16)
->Complexity(benchmark::oN);
benchmark::RegisterBenchmark("SimpleBloomFilter Benchmark Test", BM_Bloom,
&bloom, elements)
->RangeMultiplier(2)
->Range(1, 1 << 16)
->Complexity(benchmark::oN);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
}

View File

@ -0,0 +1,193 @@
#include <random>
#include <thread>
#include "data_structures/bloom/bloom_filter.hpp"
#include "data_structures/concurrent/concurrent_bloom_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using StringHashFunction = std::function<uint64_t(const std::string &)>;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V, class F>
static void InsertValue(benchmark::State &state,
ConcurrentBloomMap<K, V, F> *map,
const std::vector<std::pair<K, V>> &elements)
{
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
{
map->insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V, class F>
static void ContainsValue(benchmark::State &state,
ConcurrentBloomMap<K, V, F> *map,
const std::vector<std::pair<K, V>> elements)
{
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
{
map->contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State &state, auto *map, auto &elements) {
InsertValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State &state, auto *map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char **argv)
{
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {hash1, hash2};
BloomFilter<std::string, 128> bloom_filter_(funcs);
// maps used for testing
// ConcurrentBloomMap<int, int> ii_map;
// ConcurrentBloomMap<int, std::string> is_map;
using Filter = BloomFilter<std::string, 128>;
ConcurrentBloomMap<std::string, int, Filter> si_map(bloom_filter_);
ConcurrentBloomMap<std::string, std::string, Filter> ss_map(bloom_filter_);
// random elements for testing
// auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
// auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,186 +0,0 @@
#include <random>
#include <thread>
#include "data_structures/bloom/bloom_filter.hpp"
#include "data_structures/concurrent/concurrent_bloom_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using StringHashFunction = std::function<uint64_t(const std::string&)>;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V, class F>
static void InsertValue(benchmark::State& state, ConcurrentBloomMap<K, V, F>* map,
const std::vector<std::pair<K, V>>& elements) {
while (state.KeepRunning()) {
for (int start = 0; start < state.range(0); start++) {
map->insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V, class F>
static void ContainsValue(benchmark::State& state, ConcurrentBloomMap<K, V, F>* map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
for (int start = 0; start < state.range(0); start++) {
map->contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State& state, auto* map, auto& elements) {
InsertValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State& state, auto* map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char** argv) {
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {
hash1, hash2
};
BloomFilter<std::string, 128> bloom_filter_(funcs);
// maps used for testing
//ConcurrentBloomMap<int, int> ii_map;
//ConcurrentBloomMap<int, std::string> is_map;
using Filter = BloomFilter<std::string, 128>;
ConcurrentBloomMap<std::string, int, Filter > si_map(bloom_filter_);
ConcurrentBloomMap<std::string, std::string, Filter>
ss_map(bloom_filter_);
// random elements for testing
//auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
//auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,254 +0,0 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V>
static void InsertValue(benchmark::State& state, ConcurrentMap<K, V>* map,
const std::vector<std::pair<K, V>>& elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Deletion Benchmark Test
*/
template <class K, class V>
static void DeleteValue(benchmark::State& state, ConcurrentMap<K, V>* map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.remove(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V>
static void ContainsValue(benchmark::State& state, ConcurrentMap<K, V>* map,
const std::vector<std::pair<K, V>> elements) {
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
accessor.contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State& state, auto* map, auto& elements) {
InsertValue(state, map, elements);
};
auto BM_DeleteValue = [](benchmark::State& state, auto* map, auto elements) {
DeleteValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State& state, auto* map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char** argv) {
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
// maps used for testing
ConcurrentMap<int, int> ii_map;
ConcurrentMap<int, std::string> is_map;
ConcurrentMap<std::string, int> si_map;
ConcurrentMap<std::string, std::string> ss_map;
// random elements for testing
auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[Int, Int]", BM_InsertValue, &ii_map,
ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[Int, String]", BM_InsertValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[Int, Int]", BM_ContainsValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[Int, String]", BM_ContainsValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Deletion Banchamark Tests
benchmark::RegisterBenchmark("DeleteValue[Int, Int]", BM_DeleteValue, &ii_map,
ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[Int, String]", BM_DeleteValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, Int]", BM_DeleteValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, String]", BM_DeleteValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,123 +0,0 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Deletion and Find
- benchmarks time for total execution with operation percentages
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global Arguments
int MAX_ELEMENTS = 1 << 20, MULTIPLIER = 2;
int THREADS, INSERT_PERC, DELETE_PERC, CONTAINS_PERC, RANGE_START, RANGE_END;
// ConcurrentMap Becnhmark Test using percentages for Insert, Delete, Find
template <class K, class V>
static void Rape(benchmark::State& state, ConcurrentMap<int, int>* map,
const std::vector<std::pair<K, V>>& elements) {
int number_of_elements = state.range(0);
while (state.KeepRunning()) {
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++) {
float current_percentage = (float)start / (float)number_of_elements * 100;
if (current_percentage < (float)INSERT_PERC) {
accessor.insert(elements[start].first, elements[start].second);
} else if (current_percentage < (float)CONTAINS_PERC + INSERT_PERC) {
accessor.contains(elements[start].first);
} else {
accessor.remove(elements[start].first);
}
}
}
state.SetComplexityN(state.range(0));
}
auto BM_Rape = [](benchmark::State& state, auto* map, auto& elements) {
Rape(state, map, elements);
};
/*
Commandline Arguments Parsing
Arguments:
* Insertion percentage (0-100)
-insert number(int)
* Deletion percentage (0-100)
-delete number(int)
* Find percentage (0-100)
-find number(int)
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
-threads number
*/
void parse_arguments(int argc, char** argv) {
REGISTER_ARGS(argc, argv);
INSERT_PERC = GET_ARG("-insert", "50").get_int();
DELETE_PERC = GET_ARG("-delete", "20").get_int();
CONTAINS_PERC = GET_ARG("-find", "30").get_int();
if (INSERT_PERC + DELETE_PERC + CONTAINS_PERC != 100) {
std::cout << "Invalid percentage" << std::endl;
std::cout << "Percentage must sum to 100" << std::endl;
exit(-1);
}
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
}
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
IntegerGenerator int_gen(RANGE_START, RANGE_END);
PairGenerator<IntegerGenerator, IntegerGenerator> pair_gen(&int_gen,
&int_gen);
ConcurrentMap<int, int> map;
auto elements = utils::random::generate_vector(pair_gen, MAX_ELEMENTS);
benchmark::RegisterBenchmark("Rape", BM_Rape, &map, elements)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -0,0 +1,265 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Contain and Delete operations
- benchmarking time per operation
- test run ConcurrentMap with the following keys and values:
- <int,int>
- <int, string>
- <string, int>
- <string, string>
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using utils::random::StringGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global arguments
int MAX_ELEMENTS = 1 << 18, MULTIPLIER = 2;
int THREADS, RANGE_START, RANGE_END, STRING_LENGTH;
/*
ConcurrentMap Insertion Benchmark Test
*/
template <class K, class V>
static void InsertValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> &elements)
{
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
accessor.insert(elements[start].first, elements[start].second);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Deletion Benchmark Test
*/
template <class K, class V>
static void DeleteValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> elements)
{
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
accessor.remove(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
/*
ConcurrentMap Contains Benchmark Test
*/
template <class K, class V>
static void ContainsValue(benchmark::State &state, ConcurrentMap<K, V> *map,
const std::vector<std::pair<K, V>> elements)
{
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
accessor.contains(elements[start].first);
}
}
state.SetComplexityN(state.range(0));
}
auto BM_InsertValue = [](benchmark::State &state, auto *map, auto &elements) {
InsertValue(state, map, elements);
};
auto BM_DeleteValue = [](benchmark::State &state, auto *map, auto elements) {
DeleteValue(state, map, elements);
};
auto BM_ContainsValue = [](benchmark::State &state, auto *map, auto elements) {
ContainsValue(state, map, elements);
};
/*
Commandline Argument Parsing
Arguments:
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
- threads number
* Random String lenght
-string-length number
*/
void parse_arguments(int argc, char **argv)
{
REGISTER_ARGS(argc, argv);
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
STRING_LENGTH =
ProgramArguments::instance().get_arg("-string-length", "128").get_int();
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
StringGenerator sg(STRING_LENGTH);
IntegerGenerator ig(RANGE_START, RANGE_END);
/*
Creates RandomGenerators, ConcurentMaps and Random Element Vectors for the
following use cases:
Map elements contain keys and value for:
<int, int>,
<int, string>
<string, int>
<string, string>
*/
// random generators for tests
PairGenerator<IntegerGenerator, IntegerGenerator> piig(&ig, &ig);
PairGenerator<StringGenerator, StringGenerator> pssg(&sg, &sg);
PairGenerator<StringGenerator, IntegerGenerator> psig(&sg, &ig);
PairGenerator<IntegerGenerator, StringGenerator> pisg(&ig, &sg);
// maps used for testing
ConcurrentMap<int, int> ii_map;
ConcurrentMap<int, std::string> is_map;
ConcurrentMap<std::string, int> si_map;
ConcurrentMap<std::string, std::string> ss_map;
// random elements for testing
auto ii_elems = utils::random::generate_vector(piig, MAX_ELEMENTS);
auto is_elems = utils::random::generate_vector(pisg, MAX_ELEMENTS);
auto si_elems = utils::random::generate_vector(psig, MAX_ELEMENTS);
auto ss_elems = utils::random::generate_vector(pssg, MAX_ELEMENTS);
/* insertion Tests */
benchmark::RegisterBenchmark("InsertValue[Int, Int]", BM_InsertValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[Int, String]", BM_InsertValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, Int]", BM_InsertValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("InsertValue[String, String]", BM_InsertValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Contains Benchmark Tests
benchmark::RegisterBenchmark("ContainsValue[Int, Int]", BM_ContainsValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[Int, String]", BM_ContainsValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, Int]", BM_ContainsValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("ContainsValue[String, String]",
BM_ContainsValue, &ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
// Deletion Banchamark Tests
benchmark::RegisterBenchmark("DeleteValue[Int, Int]", BM_DeleteValue,
&ii_map, ii_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[Int, String]", BM_DeleteValue,
&is_map, is_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, Int]", BM_DeleteValue,
&si_map, si_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::RegisterBenchmark("DeleteValue[String, String]", BM_DeleteValue,
&ss_map, ss_elems)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -0,0 +1,135 @@
#include <random>
#include <thread>
#include "data_structures/concurrent/concurrent_map.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/random/generator.h"
#include "benchmark/benchmark_api.h"
/*
ConcurrentMap Benchmark Test:
- tests time of Insertion, Deletion and Find
- benchmarks time for total execution with operation percentages
*/
using utils::random::NumberGenerator;
using utils::random::PairGenerator;
using IntegerGenerator = NumberGenerator<std::uniform_int_distribution<int>,
std::default_random_engine, int>;
// Global Arguments
int MAX_ELEMENTS = 1 << 20, MULTIPLIER = 2;
int THREADS, INSERT_PERC, DELETE_PERC, CONTAINS_PERC, RANGE_START, RANGE_END;
// ConcurrentMap Becnhmark Test using percentages for Insert, Delete, Find
template <class K, class V>
static void Rape(benchmark::State &state, ConcurrentMap<int, int> *map,
const std::vector<std::pair<K, V>> &elements)
{
int number_of_elements = state.range(0);
while (state.KeepRunning())
{
auto accessor = map->access();
for (int start = 0; start < state.range(0); start++)
{
float current_percentage =
(float)start / (float)number_of_elements * 100;
if (current_percentage < (float)INSERT_PERC)
{
accessor.insert(elements[start].first, elements[start].second);
}
else if (current_percentage < (float)CONTAINS_PERC + INSERT_PERC)
{
accessor.contains(elements[start].first);
}
else
{
accessor.remove(elements[start].first);
}
}
}
state.SetComplexityN(state.range(0));
}
auto BM_Rape = [](benchmark::State &state, auto *map, auto &elements) {
Rape(state, map, elements);
};
/*
Commandline Arguments Parsing
Arguments:
* Insertion percentage (0-100)
-insert number(int)
* Deletion percentage (0-100)
-delete number(int)
* Find percentage (0-100)
-find number(int)
* Integer Range Minimum
-start number
* Integer Range Maximum
- end number
* Number of threads
-threads number
*/
void parse_arguments(int argc, char **argv)
{
REGISTER_ARGS(argc, argv);
INSERT_PERC = GET_ARG("-insert", "50").get_int();
DELETE_PERC = GET_ARG("-delete", "20").get_int();
CONTAINS_PERC = GET_ARG("-find", "30").get_int();
if (INSERT_PERC + DELETE_PERC + CONTAINS_PERC != 100)
{
std::cout << "Invalid percentage" << std::endl;
std::cout << "Percentage must sum to 100" << std::endl;
exit(-1);
}
RANGE_START = GET_ARG("-start", "0").get_int();
RANGE_END = GET_ARG("-end", "1000000000").get_int();
THREADS = std::min(GET_ARG("-threads", "1").get_int(),
(int)std::thread::hardware_concurrency());
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
parse_arguments(argc, argv);
IntegerGenerator int_gen(RANGE_START, RANGE_END);
PairGenerator<IntegerGenerator, IntegerGenerator> pair_gen(&int_gen,
&int_gen);
ConcurrentMap<int, int> map;
auto elements = utils::random::generate_vector(pair_gen, MAX_ELEMENTS);
benchmark::RegisterBenchmark("Rape", BM_Rape, &map, elements)
->RangeMultiplier(MULTIPLIER)
->Range(1, MAX_ELEMENTS)
->Complexity(benchmark::oN)
->Threads(THREADS);
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
}

View File

@ -1,44 +1,47 @@
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/time/timer.hpp"
#include "query/preprocesor.hpp"
#include "utils/time/timer.hpp"
#include "benchmark/benchmark_api.h"
#include "yaml-cpp/yaml.h"
auto BM_Strip = [](benchmark::State& state, auto& function, std::string query) {
while (state.KeepRunning()) {
for (int start = 0; start < state.range(0); start++) {
function(query);
auto BM_Strip = [](benchmark::State &state, auto &function, std::string query) {
while (state.KeepRunning())
{
for (int start = 0; start < state.range(0); start++)
{
function(query);
}
}
}
state.SetComplexityN(state.range(0));
state.SetComplexityN(state.range(0));
};
int main(int argc, char** argv) {
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
YAML::Node dataset = YAML::LoadFile(
"../../tests/data/cypher_queries/stripper/query_dict.yaml");
YAML::Node dataset = YAML::LoadFile(
"../../tests/data/cypher_queries/stripper/query_dict.yaml");
QueryPreprocessor processor;
using std::placeholders::_1;
std::function<QueryStripped(const std::string& query)> preprocess =
std::bind(&QueryPreprocessor::preprocess, &processor, _1);
QueryPreprocessor processor;
using std::placeholders::_1;
std::function<QueryStripped(const std::string &query)> preprocess =
std::bind(&QueryPreprocessor::preprocess, &processor, _1);
auto tests = dataset["benchmark_queries"].as<std::vector<std::string>>();
for (auto& test : tests) {
auto* benchmark =
benchmark::RegisterBenchmark(test.c_str(), BM_Strip, preprocess, test)
->RangeMultiplier(2)
->Range(1, 8 << 10)
->Complexity(benchmark::oN);
;
}
auto tests = dataset["benchmark_queries"].as<std::vector<std::string>>();
for (auto &test : tests)
{
auto *benchmark = benchmark::RegisterBenchmark(test.c_str(), BM_Strip,
preprocess, test)
->RangeMultiplier(2)
->Range(1, 8 << 10)
->Complexity(benchmark::oN);
}
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
benchmark::Initialize(&argc, argv);
benchmark::RunSpecifiedBenchmarks();
return 0;
return 0;
}

View File

@ -26,6 +26,8 @@ foreach(test_cpp ${test_type_cpps})
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
# link libraries
# gtest
target_link_libraries(${target_name} gtest gtest_main)
# threads (cross-platform)
target_link_libraries(${target_name} Threads::Threads)
# memgraph lib

View File

@ -3,9 +3,6 @@
#include <iostream>
#include <random>
#include <thread>
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "data_structures/bitset/dynamic_bitset.hpp"
#include "data_structures/concurrent/concurrent_list.hpp"
@ -28,8 +25,8 @@ constexpr int max_no_threads = 8;
using std::cout;
using std::endl;
using map_t = ConcurrentMap<int, int>;
using set_t = ConcurrentSet<int>;
using map_t = ConcurrentMap<int, int>;
using set_t = ConcurrentSet<int>;
using multiset_t = ConcurrentMultiSet<int>;
using multimap_t = ConcurrentMultiMap<int, int>;
@ -55,7 +52,8 @@ template <typename S>
void check_present_same(typename S::Accessor &acc, size_t data,
std::vector<size_t> &owned)
{
for (auto num : owned) {
for (auto num : owned)
{
permanent_assert(acc.find(num)->second == data,
"My data is present and my");
}
@ -83,7 +81,8 @@ void check_size_list(S &acc, long long size)
size_t iterator_counter = 0;
for (auto elem : acc) {
for (auto elem : acc)
{
++iterator_counter;
}
permanent_assert(iterator_counter == size, "Iterator count should be "
@ -103,7 +102,8 @@ void check_size(typename S::Accessor &acc, long long size)
size_t iterator_counter = 0;
for (auto elem : acc) {
for (auto elem : acc)
{
++iterator_counter;
}
permanent_assert(iterator_counter == size, "Iterator count should be "
@ -115,9 +115,11 @@ void check_size(typename S::Accessor &acc, long long size)
template <typename S>
void check_order(typename S::Accessor &acc)
{
if (acc.begin() != acc.end()) {
if (acc.begin() != acc.end())
{
auto last = acc.begin()->first;
for (auto elem : acc) {
for (auto elem : acc)
{
if (!(last <= elem))
std::cout << "Order isn't maintained. Before was: " << last
<< " next is " << elem.first << "\n";
@ -128,7 +130,8 @@ void check_order(typename S::Accessor &acc)
void check_zero(size_t key_range, long array[], const char *str)
{
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
permanent_assert(array[i] == 0,
str << " doesn't hold it's guarantees. It has "
<< array[i] << " extra elements.");
@ -137,7 +140,8 @@ void check_zero(size_t key_range, long array[], const char *str)
void check_set(DynamicBitset<> &db, std::vector<bool> &set)
{
for (int i = 0; i < set.size(); i++) {
for (int i = 0; i < set.size(); i++)
{
permanent_assert(!(set[i] ^ db.at(i)),
"Set constraints aren't fullfilled.");
}
@ -147,8 +151,9 @@ void check_set(DynamicBitset<> &db, std::vector<bool> &set)
void check_multi_iterator(multimap_t::Accessor &accessor, size_t key_range,
long set[])
{
for (int i = 0; i < key_range; i++) {
auto it = accessor.find(i);
for (int i = 0; i < key_range; i++)
{
auto it = accessor.find(i);
auto it_m = accessor.find_multi(i);
permanent_assert(
!(it_m != accessor.end(i) && it == accessor.end()),
@ -161,8 +166,10 @@ void check_multi_iterator(multimap_t::Accessor &accessor, size_t key_range,
"MultiIterator didn't found the same "
"first element. Set: "
<< set[i]);
if (set[i] > 0) {
for (int j = 0; j < set[i]; j++) {
if (set[i] > 0)
{
for (int j = 0; j < set[i]; j++)
{
permanent_assert(
it->second == it_m->second,
"MultiIterator and iterator aren't on the same "
@ -189,7 +196,8 @@ run(size_t threads_no, S &skiplist,
{
std::vector<std::future<std::pair<size_t, R>>> futures;
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i) {
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i)
{
std::packaged_task<std::pair<size_t, R>()> task(
[&skiplist, f, thread_i]() {
return std::pair<size_t, R>(thread_i,
@ -210,7 +218,8 @@ std::vector<std::future<std::pair<size_t, R>>> run(size_t threads_no,
{
std::vector<std::future<std::pair<size_t, R>>> futures;
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i) {
for (size_t thread_i = 0; thread_i < threads_no; ++thread_i)
{
std::packaged_task<std::pair<size_t, R>()> task([f, thread_i]() {
return std::pair<size_t, R>(thread_i, f(thread_i));
}); // wrap the function
@ -225,7 +234,8 @@ template <class R>
auto collect(std::vector<std::future<R>> &collect)
{
std::vector<R> collection;
for (auto &fut : collect) {
for (auto &fut : collect)
{
collection.push_back(fut.get());
}
return collection;
@ -235,9 +245,11 @@ std::vector<bool> collect_set(
std::vector<std::future<std::pair<size_t, std::vector<bool>>>> &&futures)
{
std::vector<bool> set;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
set.resize(data.second.size());
for (int i = 0; i < data.second.size(); i++) {
for (int i = 0; i < data.second.size(); i++)
{
set[i] = set[i] | data.second[i];
}
}
@ -251,58 +263,46 @@ auto insert_try(typename S::Accessor &acc, long long &downcount,
std::vector<K> &owned)
{
return [&](K key, D data) mutable {
if (acc.insert(key, data).second) {
if (acc.insert(key, data).second)
{
downcount--;
owned.push_back(key);
}
};
}
// Helper function.
int parseLine(char *line)
{
// This assumes that a digit will be found and the line ends in " Kb".
int i = strlen(line);
const char *p = line;
while (*p < '0' || *p > '9')
p++;
line[i - 3] = '\0';
i = atoi(p);
return i;
}
// Returns currentlz used memory in kB.
int currently_used_memory()
{ // Note: this value is in KB!
FILE *file = fopen("/proc/self/status", "r");
int result = -1;
char line[128];
while (fgets(line, 128, file) != NULL) {
if (strncmp(line, "VmSize:", 7) == 0) {
result = parseLine(line);
break;
}
}
fclose(file);
return result;
}
// Performs memory check to determine if memory usage before calling given
// function
// is aproximately equal to memory usage after function. Memory usage is thread
// senstive so no_threads spawned in function is necessary.
void memory_check(size_t no_threads, std::function<void()> f)
{
long long start = currently_used_memory();
logging::info("Number of threads: {}", no_threads);
// TODO: replace vm_size with something more appropriate
// the past implementation was teribble wrong
// to that ASAP
// OR
// use custom allocation wrapper
// OR
// user Boost.Test
auto start = vm_size();
logging::info("Memory check (used memory at the beginning): {}", start);
f();
long long leaked =
currently_used_memory() - start -
no_threads * 73732; // OS sensitive, 73732 size allocated for thread
std::cout << "leaked: " << leaked << "\n";
permanent_assert(leaked <= 0, "Memory leak check");
auto end = vm_size();
logging::info("Memory check (used memory at the end): {}", end);
long long delta = end - start;
logging::info("Delta: {}", delta);
// TODO: do memory check somehow
// the past implementation was wrong
permanent_assert(true, "Memory leak");
}
// TODO: move this inside logging/default
// Initializes loging faccilityes
void init_log()
{

View File

@ -1,14 +1,14 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e2;
constexpr size_t op_per_thread = 1e5;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e2;
constexpr size_t op_per_thread = 1e4;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 2;
constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 2;
constexpr size_t no_insert_for_one_delete = 1;
// This test simulates behavior of transactions.
// This test simulates behavior of a transactions.
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
// no_find_per_change and no_insert_for_one_delete.
@ -17,38 +17,50 @@ int main()
init_log();
memory_check(THREADS_NO, [] {
ConcurrentList<std::pair<int, int>> list;
permanent_assert(list.size() == 0, "The list isn't empty");
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, [&](auto index) mutable {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_change = rand_gen_bool(no_find_per_change);
auto rand_delete = rand_gen_bool(no_insert_for_one_delete);
long long sum = 0;
long long count = 0;
long long sum = 0;
long long count = 0;
for (int i = 0; i < op_per_thread; i++) {
auto num = rand();
for (int i = 0; i < op_per_thread; i++)
{
auto num = rand();
auto data = num % max_number;
if (rand_change()) {
if (rand_delete()) {
for (auto it = list.begin(); it != list.end();
it++) {
if (it->first == num) {
if (it.remove()) {
if (rand_change())
{
if (rand_delete())
{
for (auto it = list.begin(); it != list.end(); it++)
{
if (it->first == num)
{
if (it.remove())
{
sum -= data;
count--;
}
break;
}
}
} else {
}
else
{
list.begin().push(std::make_pair(num, data));
sum += data;
count++;
}
} else {
for (auto &v : list) {
if (v.first == num) {
}
else
{
for (auto &v : list)
{
if (v.first == num)
{
permanent_assert(v.second == data,
"Data is invalid");
break;
@ -60,18 +72,23 @@ int main()
return std::pair<long long, long long>(sum, count);
});
auto it = list.begin();
long long sums = 0;
auto it = list.begin();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : list) {
for (auto &e : list)
{
sums -= e.second;
}
permanent_assert(sums == 0, "Same values aren't present");
check_size_list<ConcurrentList<std::pair<int, int>>>(list, counters);
std::this_thread::sleep_for(1s);
});
}

View File

@ -1,33 +1,41 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t bit_part_len = 2;
constexpr size_t no_slots = 1e4;
constexpr size_t key_range = no_slots * THREADS_NO * bit_part_len;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t bit_part_len = 2;
constexpr size_t no_slots = 1e4;
constexpr size_t key_range = no_slots * THREADS_NO * bit_part_len;
constexpr size_t no_sets_per_clear = 2;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto seted =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(no_slots);
auto rand = rand_gen(no_slots);
auto clear_op = rand_gen_bool(no_sets_per_clear);
std::vector<bool> set(key_range);
for (size_t i = 0; i < op_per_thread; i++) {
for (size_t i = 0; i < op_per_thread; i++)
{
size_t num =
rand() * THREADS_NO * bit_part_len + index * bit_part_len;
if (clear_op()) {
if (clear_op())
{
db.clear(num, bit_part_len);
for (int j = 0; j < bit_part_len; j++) {
for (int j = 0; j < bit_part_len; j++)
{
set[num + j] = false;
}
} else {
}
else
{
db.set(num, bit_part_len);
for (int j = 0; j < bit_part_len; j++)
for (int j = 0; j < bit_part_len; j++)
set[num + j] = true;
}
}

View File

@ -1,25 +1,29 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t up_border_bit_set_pow2 = 3;
constexpr size_t key_range =
op_per_thread * THREADS_NO * (1 << up_border_bit_set_pow2) * 2;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto seted =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
for (size_t i = 0; i < op_per_thread; i++)
{
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
db.set(num, len);
for (int j = 0; j < len; j++)
for (int j = 0; j < len; j++)
set[num + j] = true;
}
@ -28,14 +32,16 @@ int main()
auto cleared =
collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
for (size_t i = 0; i < op_per_thread; i++)
{
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
for (int j = 0; j < len; j++) {
for (int j = 0; j < len; j++)
{
set[num + j] = set[num + j] | db.at(num + j);
}
db.clear(num, len);
@ -44,7 +50,8 @@ int main()
return set;
}));
for (size_t i = 0; i < seted.size(); i++) {
for (size_t i = 0; i < seted.size(); i++)
{
seted[i] = seted[i] & (!cleared[i]);
}

View File

@ -1,17 +1,21 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t op_per_thread = 1e5;
constexpr size_t key_range = op_per_thread * THREADS_NO * 3;
constexpr size_t key_range = op_per_thread * THREADS_NO * 3;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto set = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
std::vector<bool> set(key_range);
for (size_t i = 0; i < op_per_thread; i++) {
for (size_t i = 0; i < op_per_thread; i++)
{
size_t num = rand();
db.set(num);
set[num] = true;

View File

@ -1,24 +1,28 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t op_per_thread = 1e5;
constexpr size_t up_border_bit_set_pow2 = 3;
constexpr size_t key_range =
op_per_thread * THREADS_NO * (1 << up_border_bit_set_pow2) * 2;
// TODO: document the test
int main()
{
DynamicBitset<> db;
auto set = collect_set(run<std::vector<bool>>(THREADS_NO, [&](auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
auto rand_len = rand_gen(up_border_bit_set_pow2);
std::vector<bool> set(key_range + (1 << up_border_bit_set_pow2));
for (size_t i = 0; i < op_per_thread; i++) {
auto len = 1 << rand_len();
for (size_t i = 0; i < op_per_thread; i++)
{
auto len = 1 << rand_len();
size_t num = (rand() / len) * len;
db.set(num, len);
for (int j = 0; j < len; j++)
for (int j = 0; j < len; j++)
set[num + j] = true;
}

View File

@ -1,62 +0,0 @@
#include <cassert>
#include <iostream>
#include <thread>
#include "common.h"
#include "data_structures/linked_list.hpp"
using std::cout;
using std::endl;
template <typename list_type>
void test_concurrent_list_access(list_type &list, std::size_t size)
{
// test concurrent access
for (int i = 0; i < 1000000; ++i) {
std::thread t1([&list] {
list.push_front(1);
list.pop_front();
});
std::thread t2([&list] {
list.push_front(2);
list.pop_front();
});
t1.join();
t2.join();
assert(list.size() == size);
}
}
int main()
{
init_log();
LinkedList<int> list;
// push & pop operations
list.push_front(10);
list.push_front(20);
auto a = list.front();
assert(a == 20);
list.pop_front();
a = list.front();
assert(a == 10);
list.pop_front();
assert(list.size() == 0);
// concurrent test
LinkedList<int> concurrent_list;
concurrent_list.push_front(1);
concurrent_list.push_front(1);
std::list<int> no_concurrent_list;
no_concurrent_list.push_front(1);
no_concurrent_list.push_front(1);
test_concurrent_list_access(concurrent_list, 2);
// test_concurrent_list_access(no_concurrent_list, 2);
return 0;
}

View File

@ -3,25 +3,29 @@
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 100000;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
// TODO: document the test
// This test checks insert_unique method under pressure.
// Test checks for missing data and changed/overwriten data.
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;
auto futures = run<std::vector<size_t>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
long long downcount = elems_per_thread;
std::vector<size_t> owned;
auto inserter =
insert_try<size_t, size_t, map_t>(acc, downcount, owned);
do {
do
{
inserter(rand(), index);
} while (downcount > 0);
@ -30,7 +34,8 @@ int main()
});
auto accessor = skiplist.access();
for (auto &owned : collect(futures)) {
for (auto &owned : collect(futures))
{
check_present_same<map_t>(accessor, owned);
}

View File

@ -1,8 +1,10 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 100000;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
// TODO: document the test
// This test checks insert_unique method under pressure.
// Threads will try to insert keys in the same order.
@ -11,18 +13,20 @@ constexpr size_t key_range = elems_per_thread * THREADS_NO * 2;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;
auto futures = run<std::vector<size_t>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand = rand_gen(key_range);
long long downcount = elems_per_thread;
std::vector<size_t> owned;
auto inserter =
insert_try<size_t, size_t, map_t>(acc, downcount, owned);
for (int i = 0; downcount > 0; i++) {
for (int i = 0; downcount > 0; i++)
{
inserter(i, index);
}
@ -31,7 +35,8 @@ int main()
});
auto accessor = skiplist.access();
for (auto &owned : collect(futures)) {
for (auto &owned : collect(futures))
{
check_present_same<map_t>(accessor, owned);
}

View File

@ -1,21 +1,26 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elems_per_thread = 1e5;
// TODO: document the test
int main()
{
init_log();
memory_check(THREADS_NO, [&] {
ds::static_array<std::thread, THREADS_NO> threads;
map_t skiplist;
// put THREADS_NO * elems_per_thread items to the skiplist
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
accessor.insert(elem_i, elem_i);
}
},
@ -23,7 +28,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -34,11 +40,13 @@ int main()
"all elements in skiplist");
}
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
permanent_assert(accessor.remove(elem_i) == true, "");
}
},
@ -46,7 +54,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// // wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -61,8 +70,9 @@ int main()
// check count
{
size_t iterator_counter = 0;
auto accessor = skiplist.access();
for (auto elem : accessor) {
auto accessor = skiplist.access();
for (auto elem : accessor)
{
++iterator_counter;
cout << elem.first << " ";
}

View File

@ -1,13 +1,16 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t elements = 2e6;
// Test for simple memory leaks
/**
* Put elements number of elements in the skiplist per each thread and see
* is there any memory leak
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,22 +1,30 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 1);
constexpr size_t THREADS_NO = std::min(max_no_threads, 1);
constexpr size_t elems_per_thread = 16e5;
// Known memory leak at 1,600,000 elements.
// TODO: Memory leak at 1,600,000 elements (Kruno wrote this here but
// the memory_check method had invalid implementation)
// 1. implement valid memory_check
// 2. analyse this code
// 3. fix the memory leak
// 4. write proper test
int main()
{
init_log();
memory_check(THREADS_NO, [&] {
ds::static_array<std::thread, THREADS_NO> threads;
map_t skiplist;
// put THREADS_NO * elems_per_thread items to the skiplist
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
accessor.insert(elem_i, elem_i);
}
},
@ -24,7 +32,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -35,11 +44,13 @@ int main()
"all elements in skiplist");
}
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i)
{
threads[thread_i] = std::thread(
[&skiplist](size_t start, size_t end) {
auto accessor = skiplist.access();
for (size_t elem_i = start; elem_i < end; ++elem_i) {
for (size_t elem_i = start; elem_i < end; ++elem_i)
{
permanent_assert(accessor.remove(elem_i) == true, "");
}
},
@ -47,7 +58,8 @@ int main()
thread_i * elems_per_thread + elems_per_thread);
}
// // wait all threads
for (auto &thread : threads) {
for (auto &thread : threads)
{
thread.join();
}
@ -62,8 +74,9 @@ int main()
// check count
{
size_t iterator_counter = 0;
auto accessor = skiplist.access();
for (auto elem : accessor) {
auto accessor = skiplist.access();
for (auto elem : accessor)
{
++iterator_counter;
cout << elem.first << " ";
}

View File

@ -7,13 +7,16 @@ constexpr size_t op_per_thread = 1e5;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 1;
// This test checks MultiIterator from multimap.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls which always
// succeed.
/**
* This test checks MultiIterator from multimap.
* Each thread removes random data. So removes are joint.
* Calls of remove method are interleaved with insert calls which always
* succeed.
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;

View File

@ -1,48 +1,57 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 1;
// This test checks MultiIterator remove method.
// Each thread removes random data. So removes are joint and scattered on same
// key values.
// Calls of remove method are interleaved with insert calls which always
// succeed.
/**
* This test checks MultiIterator remove method.
* Each thread removes random data. So removes are joint and scattered on same
* key values. Calls of remove method are interleaved with insert calls which
* always succeed.
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;
auto futures = run<std::pair<long long, std::vector<long long>>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long long> set(key_range, 0);
long long sum = 0;
do {
do
{
size_t num = rand();
auto data = rand() % max_number;
if (rand_op()) {
auto data = rand() % max_number;
if (rand_op())
{
int len = 0;
for (auto it = acc.find_multi(num); it.has_value();
it++) {
it++)
{
len++;
}
if (len > 0) {
if (len > 0)
{
int pos = rand() % len;
for (auto it = acc.find_multi(num); it.has_value();
it++) {
if (pos == 0) {
it++)
{
if (pos == 0)
{
auto data_r = it->second;
if (it.remove()) {
if (it.remove())
{
downcount--;
set[num]--;
sum -= data_r;
@ -55,7 +64,9 @@ int main()
pos--;
}
}
} else {
}
else
{
acc.insert(num, data);
downcount--;
set[num]++;
@ -67,10 +78,12 @@ int main()
});
long set[key_range] = {0};
long long sums = 0;
for (auto &data : collect(futures)) {
long long sums = 0;
for (auto &data : collect(futures))
{
sums += data.second.first;
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
set[i] += data.second.second[i];
}
}
@ -78,7 +91,8 @@ int main()
auto accessor = skiplist.access();
check_multi_iterator(accessor, key_range, set);
for (auto &e : accessor) {
for (auto &e : accessor)
{
set[e.first]--;
sums -= e.second;
}

View File

@ -1,42 +1,48 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 4);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// This test checks MultiIterator remove method ].
// Each thread removes all duplicate data on random key. So removes are joint
// and scattered on same
// key values.
// Calls of remove method are interleaved with insert calls which always
// succeed.
/**
* This test checks MultiIterator remove method. Each thread removes all
* duplicate data for a random key. So removes are joined and scattered on the
* same key values. Calls of remove method are interleaved with insert calls
* which always succeed.
*/
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;
auto futures = run<std::pair<long long, std::vector<long long>>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long long> set(key_range, 0);
long long sum = 0;
do {
do
{
size_t num = rand();
auto data = rand() % max_number;
if (rand_op()) {
auto data = rand() % max_number;
if (rand_op())
{
auto it = acc.find_multi(num);
if (it.has_value()) {
if (it.has_value())
{
it++;
while (it.has_value()) {
while (it.has_value())
{
auto data_r = it->second;
if (it.remove()) {
if (it.remove())
{
downcount--;
set[num]--;
sum -= data_r;
@ -47,7 +53,9 @@ int main()
it++;
}
}
} else {
}
else
{
acc.insert(num, data);
downcount--;
set[num]++;
@ -59,10 +67,12 @@ int main()
});
long set[key_range] = {0};
long long sums = 0;
for (auto &data : collect(futures)) {
long long sums = 0;
for (auto &data : collect(futures))
{
sums += data.second.first;
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
set[i] += data.second.second[i];
}
}
@ -70,7 +80,8 @@ int main()
auto accessor = skiplist.access();
check_multi_iterator(accessor, key_range, set);
for (auto &e : accessor) {
for (auto &e : accessor)
{
set[e.first]--;
sums -= e.second;
}

View File

@ -1,12 +1,14 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks multimap.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls which always
@ -14,29 +16,35 @@ constexpr size_t no_insert_for_one_delete = 1;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multimap_t skiplist;
std::atomic<long long> size(0);
auto futures = run<std::pair<long long, std::vector<long long>>>(
THREADS_NO, skiplist, [&size](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
std::vector<long long> set(key_range, 0);
long long sum = 0;
do {
do
{
size_t num = rand();
auto data = num % max_number;
if (rand_op()) {
if (acc.remove(num)) {
auto data = num % max_number;
if (rand_op())
{
if (acc.remove(num))
{
downcount--;
set[num]--;
sum -= data;
size--;
}
} else {
}
else
{
acc.insert(num, data);
downcount--;
set[num]++;
@ -49,11 +57,13 @@ int main()
});
long set[key_range] = {0};
long long sums = 0;
long long sums = 0;
long long size_calc = 0;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
sums += data.second.first;
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
set[i] += data.second.second[i];
size_calc += data.second.second[i];
}
@ -64,15 +74,18 @@ int main()
check_order<multimap_t>(accessor);
auto bef_it = accessor.end();
for (int i = 0; i < key_range; i++) {
for (int i = 0; i < key_range; i++)
{
auto it = accessor.find(i);
if (set[i] > 0) {
if (set[i] > 0)
{
permanent_assert(it != accessor.end(),
"Multimap doesn't contain necessary element "
<< i);
if (bef_it == accessor.end()) bef_it = accessor.find(i);
for (int j = 0; j < set[i]; j++) {
for (int j = 0; j < set[i]; j++)
{
permanent_assert(
bef_it != accessor.end(),
"Previous iterator doesn't iterate through same "
@ -89,7 +102,8 @@ int main()
bef_it++;
}
for (int j = 0; j < set[i]; j++) {
for (int j = 0; j < set[i]; j++)
{
permanent_assert(it != accessor.end(),
"Iterator doesn't iterate through same "
"key entrys. Expected "
@ -110,7 +124,8 @@ int main()
}
}
for (auto &e : accessor) {
for (auto &e : accessor)
{
set[e.first]--;
sums -= e.second;
}

View File

@ -5,6 +5,8 @@ constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks multiset.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls which always
@ -12,6 +14,7 @@ constexpr size_t no_insert_for_one_delete = 1;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
multiset_t skiplist;

View File

@ -6,6 +6,8 @@ constexpr size_t op_per_thread = 1e5;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks remove method under pressure.
// Threads will try to insert and remove keys aproximetly in the same order.
// This will force threads to compete intensly with each other.
@ -13,6 +15,7 @@ constexpr size_t no_insert_for_one_delete = 2;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -5,12 +5,15 @@ constexpr size_t key_range = 1e5;
constexpr size_t op_per_thread = 1e6;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test checks remove method under pressure.
// Each thread removes it's own data. So removes are disjoint.
// Calls of remove method are interleaved with insert calls.
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,12 +1,14 @@
#include "common.h"
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t THREADS_NO = std::min(max_no_threads, 8);
constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
// Depending on value there is a possiblity of numerical overflow
constexpr size_t max_number = 10;
constexpr size_t max_number = 10;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks remove method under pressure.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
@ -18,23 +20,29 @@ int main()
auto futures = run<std::pair<long long, long long>>(
THREADS_NO, skiplist, [](auto acc, auto index) {
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
auto rand = rand_gen(key_range);
auto rand_op = rand_gen_bool(no_insert_for_one_delete);
long long downcount = op_per_thread;
long long sum = 0;
long long count = 0;
long long sum = 0;
long long count = 0;
do {
auto num = rand();
do
{
auto num = rand();
auto data = num % max_number;
if (rand_op()) {
if (acc.remove(num)) {
if (rand_op())
{
if (acc.remove(num))
{
sum -= data;
downcount--;
count--;
}
} else {
if (acc.insert(num, data).second) {
}
else
{
if (acc.insert(num, data).second)
{
sum += data;
downcount--;
count++;
@ -45,15 +53,17 @@ int main()
return std::pair<long long, long long>(sum, count);
});
auto accessor = skiplist.access();
long long sums = 0;
auto accessor = skiplist.access();
long long sums = 0;
long long counters = 0;
for (auto &data : collect(futures)) {
for (auto &data : collect(futures))
{
sums += data.second.first;
counters += data.second.second;
}
for (auto &e : accessor) {
for (auto &e : accessor)
{
sums -= e.second;
}
permanent_assert(sums == 0, "Aproximetly Same values are present");

View File

@ -5,12 +5,15 @@ constexpr size_t key_range = 1e4;
constexpr size_t op_per_thread = 1e5;
constexpr size_t no_insert_for_one_delete = 2;
// TODO: document the test
// This test checks set.
// Each thread removes random data. So removes are joint.
// Calls of remove method are interleaved with insert calls.
int main()
{
init_log();
memory_check(THREADS_NO, [] {
set_t skiplist;

View File

@ -8,6 +8,8 @@ constexpr size_t max_number = 10;
constexpr size_t no_find_per_change = 5;
constexpr size_t no_insert_for_one_delete = 1;
// TODO: document the test
// This test simulates behavior of transactions.
// Each thread makes a series of finds interleaved with method which change.
// Exact ratio of finds per change and insert per delete can be regulated with
@ -15,6 +17,7 @@ constexpr size_t no_insert_for_one_delete = 1;
int main()
{
init_log();
memory_check(THREADS_NO, [] {
map_t skiplist;

View File

@ -1,11 +1,21 @@
#include <iostream>
#include <chrono>
#include "gtest/gtest.h"
#include "logging/default.cpp"
#include "utils/timer/timer.hpp"
#include "utils/assert.hpp"
using namespace std::chrono_literals;
/**
* Creates a test timer which will log timeout message at the timeout event.
*
* @param counter how many time units the timer has to wait
*
* @return shared pointer to a timer
*/
Timer::sptr create_test_timer(int64_t counter)
{
return std::make_shared<Timer>(
@ -13,16 +23,38 @@ Timer::sptr create_test_timer(int64_t counter)
);
}
int main(void)
TEST(TimerSchedulerTest, TimerSchedulerExecution)
{
// initialize the timer
TimerScheduler<TimerSet, std::chrono::seconds> timer_scheduler;
// run the timer
timer_scheduler.run();
// add a couple of test timers
for (int64_t i = 1; i <= 3; ++i) {
timer_scheduler.add(create_test_timer(i));
}
// wait for that timers
std::this_thread::sleep_for(4s);
ASSERT_EQ(timer_scheduler.size(), 0);
// add another test timer
timer_scheduler.add(create_test_timer(1));
// wait for another timer
std::this_thread::sleep_for(2s);
// the test is done
timer_scheduler.stop();
return 0;
ASSERT_EQ(timer_scheduler.size(), 0);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,19 +1,24 @@
#include "_hardcoded_query/basic.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "query/preprocesor.hpp"
#include "query/strip/stripper.hpp"
#include "utils/assert.hpp"
#include "utils/sysinfo/memory.hpp"
template <class S, class Q>
void run(size_t n, std::string &query, S &stripper, Q &qf)
QueryPreprocessor preprocessor;
template <class Q>
void run(size_t n, std::string &query, Q &qf)
{
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
<< std::endl;
auto stripped = preprocessor.preprocess(query);
logging::info("Running query [{}] x {}.", stripped.hash, n);
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
assert(qf[stripped.hash](std::move(vec)));
permanent_assert(qf[stripped.hash](std::move(vec)), "Query failed!");
}
}
@ -29,13 +34,10 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
size_t entities_number = 1000;
Db db("cleaning");
auto query_functions = hardcode::load_basic_functions(db);
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
size_t entities_number = 1000;
auto query_functions = hardcode::load_basic_functions(db);
std::string create_vertex_label =
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
@ -49,17 +51,21 @@ int main(void)
// clean vertices
// delete vertices a
// clean vertices
run(entities_number, create_vertex_label, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(entities_number, create_vertex_label, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == entities_number);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match (after cleaning)");
run(1, delete_label_vertices, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(1, delete_label_vertices, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match (delete label vertices)");
clean_vertex(db);
assert(db.graph.vertices.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0,
"Db should be empty");
// ******************************* TEST 2 ********************************//
// add vertices a
@ -68,26 +74,33 @@ int main(void)
// delete vertices a
// clean vertices
// delete vertices all
run(entities_number, create_vertex_label, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(entities_number, create_vertex_label, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
run(entities_number, create_vertex_other, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number * 2);
run(entities_number, create_vertex_other, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number * 2,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == entities_number * 2);
permanent_assert(db.graph.vertices.access().size() == entities_number * 2,
"Entities number doesn't match");
run(1, delete_label_vertices, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number * 2);
run(1, delete_label_vertices, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number * 2,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == entities_number);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
run(1, delete_all_vertices, stripper, query_functions);
assert(db.graph.vertices.access().size() == entities_number);
run(1, delete_all_vertices, query_functions);
permanent_assert(db.graph.vertices.access().size() == entities_number,
"Entities number doesn't match");
clean_vertex(db);
assert(db.graph.vertices.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0,
"Db should be empty");
// TODO: more tests

View File

@ -3,10 +3,16 @@
#include "_hardcoded_query/basic.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "query/preprocesor.hpp"
#include "query/strip/stripper.hpp"
#include "storage/indexes/indexes.hpp"
#include "utils/assert.hpp"
#include "utils/signals/handler.hpp"
#include "utils/stacktrace/log.hpp"
#include "utils/sysinfo/memory.hpp"
QueryPreprocessor preprocessor;
// Returns uniform random size_t generator from range [0,n>
auto rand_gen(size_t n)
{
@ -17,44 +23,43 @@ auto rand_gen(size_t n)
void run(size_t n, std::string &query, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
auto stripped = preprocessor.preprocess(query);
auto qf = hardcode::load_basic_functions(db);
logging::info("Running query [{}] x {}.", stripped.hash, n);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
<< std::endl;
for (int i = 0; i < n; i++) {
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
assert(qf[stripped.hash](std::move(vec)));
auto commited = qf[stripped.hash](std::move(vec));
permanent_assert(commited, "Query execution failed");
}
}
void add_edge(size_t n, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
std::string query = "MATCH (n1), (n2) WHERE ID(n1)=0 AND "
"ID(n2)=1 CREATE (n1)<-[r:IS {age: "
"25,weight: 70}]-(n2) RETURN r";
auto stripped = preprocessor.preprocess(query);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n
<< " time to add edge." << std::endl;
logging::info("Running query [{}] (add edge) x {}", stripped.hash, n);
std::vector<int64_t> vertices;
for (auto &v : db.graph.vertices.access()) {
for (auto &v : db.graph.vertices.access())
{
vertices.push_back(v.second.id);
}
permanent_assert(vertices.size() > 0, "Vertices size is zero");
auto rand = rand_gen(vertices.size());
for (int i = 0; i < n; i++) {
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
assert(qf[stripped.hash](std::move(vec)));
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
permanent_assert(qf[stripped.hash](std::move(vec)), "Add edge failed");
}
}
@ -64,7 +69,7 @@ void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
t.vertex_access().fill().update().for_all([&](auto va) { va.set(prop); });
assert(t.commit());
permanent_assert(t.commit(), "Add property failed");
}
void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
@ -79,7 +84,7 @@ void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
i++;
});
assert(t.commit());
permanent_assert(t.commit(), "Add vertex property serial int failed");
}
void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
@ -94,7 +99,7 @@ void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
i++;
});
assert(t.commit());
permanent_assert(t.commit(), "Add Edge property serial int failed");
}
template <class TG>
@ -103,8 +108,9 @@ size_t size(Db &db, IndexHolder<TG, std::nullptr_t> &h)
DbAccessor t(db);
size_t count = 0;
auto oin = h.get_read();
if (oin.is_present()) {
auto oin = h.get_read();
if (oin.is_present())
{
oin.get()->for_range(t).for_all([&](auto va) mutable { count++; });
}
@ -115,8 +121,10 @@ size_t size(Db &db, IndexHolder<TG, std::nullptr_t> &h)
void assert_empty(Db &db)
{
assert(db.graph.vertices.access().size() == 0);
assert(db.graph.edges.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0,
"DB isn't empty (vertices)");
permanent_assert(db.graph.edges.access().size() == 0,
"DB isn't empty (edges)");
}
void clean_vertex(Db &db)
@ -136,7 +144,7 @@ void clean_edge(Db &db)
void clear_database(Db &db)
{
std::string delete_all_vertices = "MATCH (n) DELETE n";
std::string delete_all_edges = "MATCH ()-[r]-() DELETE r";
std::string delete_all_edges = "MATCH ()-[r]-() DELETE r";
run(1, delete_all_edges, db);
run(1, delete_all_vertices, db);
@ -151,14 +159,16 @@ bool equal(Db &a, Db &b)
auto acc_a = a.graph.vertices.access();
auto acc_b = b.graph.vertices.access();
if (acc_a.size() != acc_b.size()) {
if (acc_a.size() != acc_b.size())
{
return false;
}
auto it_a = acc_a.begin();
auto it_b = acc_b.begin();
for (auto i = acc_a.size(); i > 0; i--) {
for (auto i = acc_a.size(); i > 0; i--)
{
// TODO: compare
}
}
@ -167,14 +177,16 @@ bool equal(Db &a, Db &b)
auto acc_a = a.graph.edges.access();
auto acc_b = b.graph.edges.access();
if (acc_a.size() != acc_b.size()) {
if (acc_a.size() != acc_b.size())
{
return false;
}
auto it_a = acc_a.begin();
auto it_b = acc_b.begin();
for (auto i = acc_a.size(); i > 0; i--) {
for (auto i = acc_a.size(); i > 0; i--)
{
// TODO: compare
}
}
@ -187,6 +199,16 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
SignalHandler::register_handler(Signal::SegmentationFault, []() {
log_stacktrace("SegmentationFault signal raised");
std::exit(EXIT_FAILURE);
});
SignalHandler::register_handler(Signal::BusError, []() {
log_stacktrace("Bus error signal raised");
std::exit(EXIT_FAILURE);
});
size_t cvl_n = 1;
std::string create_vertex_label =
@ -194,7 +216,7 @@ int main(void)
std::string create_vertex_other =
"CREATE (n:OTHER {name: \"cleaner_test\"}) RETURN n";
std::string delete_label_vertices = "MATCH (n:LABEL) DELETE n";
std::string delete_all_vertices = "MATCH (n) DELETE n";
std::string delete_all_vertices = "MATCH (n) DELETE n";
IndexDefinition vertex_property_nonunique_unordered = {
IndexLocation{VertexSide, Option<std::string>("prop"),
@ -215,15 +237,19 @@ int main(void)
// ******************************* TEST 1 ********************************//
{
std::cout << "TEST1" << std::endl;
logging::info("TEST 1");
// add indexes
// add vertices LABEL
// add edges
// add vertices property
// assert index size.
Db db("index", false);
assert(db.indexes().add_index(vertex_property_nonunique_unordered));
assert(db.indexes().add_index(edge_property_nonunique_unordered));
permanent_assert(
db.indexes().add_index(vertex_property_nonunique_unordered),
"Add vertex index failed");
permanent_assert(
db.indexes().add_index(edge_property_nonunique_unordered),
"Add edge index failed");
run(cvl_n, create_vertex_label, db);
auto sp = StoredProperty<TypeGroupVertex>(
@ -232,18 +258,21 @@ int main(void)
.family_key());
add_property(db, sp);
assert(cvl_n ==
size(db, db.graph.vertices.property_family_find_or_create("prop")
.index));
permanent_assert(
cvl_n == size(db, db.graph.vertices
.property_family_find_or_create("prop")
.index),
"Create vertex property failed");
add_edge(cvl_n, db);
add_edge_property_serial_int(
db, db.graph.edges.property_family_find_or_create("prop"));
assert(
permanent_assert(
cvl_n ==
size(db,
db.graph.edges.property_family_find_or_create("prop").index));
size(db, db.graph.edges.property_family_find_or_create("prop")
.index),
"Create edge property failed");
}
// TODO: more tests

View File

@ -1,12 +1,18 @@
#include <random>
#include "_hardcoded_query/basic.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "_hardcoded_query/basic.hpp"
#include "query/preprocesor.hpp"
#include "query/strip/stripper.hpp"
#include "storage/indexes/indexes.hpp"
#include "utils/assert.hpp"
#include "utils/signals/handler.hpp"
#include "utils/stacktrace/log.hpp"
#include "utils/sysinfo/memory.hpp"
QueryPreprocessor preprocessor;
// Returns uniform random size_t generator from range [0,n>
auto rand_gen(size_t n)
{
@ -17,32 +23,28 @@ auto rand_gen(size_t n)
void run(size_t n, std::string &query, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
auto stripped = preprocessor.preprocess(query);
auto qf = hardcode::load_basic_functions(db);
logging::info("Running query {} [{}] x {}.", query, stripped.hash, n);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n << " time."
<< std::endl;
for (int i = 0; i < n; i++)
{
properties_t vec = stripped.arguments;
assert(qf[stripped.hash](std::move(vec)));
permanent_assert(qf[stripped.hash](std::move(vec)), "Query aborted");
}
}
void add_edge(size_t n, Db &db)
{
auto stripper = make_query_stripper(TK_LONG, TK_FLOAT, TK_STR, TK_BOOL);
auto qf = hardcode::load_basic_functions(db);
auto qf = hardcode::load_basic_functions(db);
std::string query = "MATCH (n1), (n2) WHERE ID(n1)=0 AND "
"ID(n2)=1 CREATE (n1)<-[r:IS {age: "
"25,weight: 70}]-(n2) RETURN r";
auto stripped = preprocessor.preprocess(query);
auto stripped = stripper.strip(query);
std::cout << "Running query [" << stripped.hash << "] for " << n
<< " time to add edge." << std::endl;
logging::info("Running query {} [{}] x {}.", query, stripped.hash, n);
std::vector<int64_t> vertices;
for (auto &v : db.graph.vertices.access())
@ -56,7 +58,7 @@ void add_edge(size_t n, Db &db)
properties_t vec = stripped.arguments;
vec[0] = Property(Int64(vertices[rand()]), Flags::Int64);
vec[1] = Property(Int64(vertices[rand()]), Flags::Int64);
assert(qf[stripped.hash](std::move(vec)));
permanent_assert(qf[stripped.hash](std::move(vec)), "Query aborted");
}
}
@ -66,7 +68,8 @@ void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
t.vertex_access().fill().for_all([&](auto va) { va.set(prop); });
assert(t.commit());
permanent_assert(t.commit(), "add property query aborted");
;
}
void add_property_different_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
@ -81,7 +84,7 @@ void add_property_different_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
i++;
});
assert(t.commit());
permanent_assert(t.commit(), "add property different int aborted");
}
size_t size(Db &db, IndexHolder<TypeGroupVertex, std::nullptr_t> &h)
@ -102,8 +105,8 @@ size_t size(Db &db, IndexHolder<TypeGroupVertex, std::nullptr_t> &h)
void assert_empty(Db &db)
{
assert(db.graph.vertices.access().size() == 0);
assert(db.graph.edges.access().size() == 0);
permanent_assert(db.graph.vertices.access().size() == 0, "Db isn't empty");
permanent_assert(db.graph.edges.access().size() == 0, "Db isn't empty");
}
void clean_vertex(Db &db)
@ -178,6 +181,11 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
SignalHandler::register_handler(Signal::SegmentationFault, []() {
log_stacktrace("SegmentationFault signal raised");
std::exit(EXIT_FAILURE);
});
size_t cvl_n = 1000;
std::string create_vertex_label =
@ -187,9 +195,8 @@ int main(void)
std::string delete_label_vertices = "MATCH (n:LABEL) DELETE n";
std::string delete_all_vertices = "MATCH (n) DELETE n";
// ******************************* TEST 1 ********************************//
{
std::cout << "TEST1" << std::endl;
logging::info("TEST 1");
// make snapshot of empty db
// add vertexs
// add edges
@ -203,11 +210,11 @@ int main(void)
clear_database(db);
db.snap_engine.import();
assert_empty(db);
logging::info("END of TEST 1");
}
// ******************************* TEST 2 ********************************//
{
std::cout << "TEST2" << std::endl;
logging::info("TEST 2");
// add vertexs
// add edges
// make snapshot of db
@ -223,13 +230,12 @@ int main(void)
db.snap_engine.import();
{
Db db2("snapshot");
assert(equal(db, db2));
permanent_assert(equal(db, db2), "Dbs aren't equal");
}
}
// ******************************* TEST 3 ********************************//
{
std::cout << "TEST3" << std::endl;
logging::info("TEST 3");
// add vertexs
// add edges
// make snapshot of db
@ -240,13 +246,12 @@ int main(void)
db.snap_engine.make_snapshot();
{
Db db2("not_snapshot");
assert(!equal(db, db2));
permanent_assert(!equal(db, db2), "Dbs are equal");
}
}
// ******************************* TEST 4 ********************************//
{
std::cout << "TEST4" << std::endl;
logging::info("TEST 4");
// add vertices LABEL
// add properties
// add vertices LABEL
@ -265,14 +270,17 @@ int main(void)
IndexLocation{VertexSide, Option<std::string>("prop"),
Option<std::string>(), Option<std::string>()},
IndexType{false, None}};
assert(db.indexes().add_index(idef));
assert(cvl_n == size(db, family.index));
permanent_assert(db.indexes().add_index(idef), "Index isn't added");
permanent_assert(cvl_n == size(db, family.index),
"Index size isn't valid");
db.snap_engine.make_snapshot();
{
Db db2("snapshot");
assert(cvl_n == size(db, db2.graph.vertices
.property_family_find_or_create("prop")
.index));
permanent_assert(
cvl_n == size(db, db2.graph.vertices
.property_family_find_or_create("prop")
.index),
"Index size isn't valid");
}
}

View File

@ -26,6 +26,8 @@ foreach(test_cpp ${test_type_cpps})
set_target_properties(${target_name} PROPERTIES OUTPUT_NAME ${exec_name})
# link libraries
# gtest
target_link_libraries(${target_name} gtest gtest_main)
# filesystem
target_link_libraries(${target_name} stdc++fs)
# threads (cross-platform)
@ -42,6 +44,7 @@ foreach(test_cpp ${test_type_cpps})
target_link_libraries(${target_name} dl)
# register test
add_test(${target_name} ${exec_name})
set(output_path ${CMAKE_BINARY_DIR}/test_results/unit/${target_name}.xml)
add_test(${target_name} ${exec_name} --gtest_output=xml:${output_path})
endforeach()

2
tests/unit/README.md Normal file
View File

@ -0,0 +1,2 @@
All unit test should be gtest because in that case the test infrastructure can
then visualize the results. (JUnit xml output)

View File

@ -1,45 +1,39 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include <functional>
#include "data_structures/bloom/bloom_filter.hpp"
#include "utils/command_line/arguments.hpp"
#include "utils/hashing/fnv64.hpp"
#include "data_structures/bloom/bloom_filter.hpp"
using StringHashFunction = std::function<uint64_t(const std::string &)>;
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wwritable-strings"
TEST(BloomFilterTest, InsertContains)
{
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
std::vector<StringHashFunction> funcs = {hash1, hash2};
using StringHashFunction = std::function<uint64_t(const std::string&)>;
TEST_CASE("BloomFilter Test") {
StringHashFunction hash1 = fnv64<std::string>;
StringHashFunction hash2 = fnv1a64<std::string>;
BloomFilter<std::string, 64> bloom(funcs);
auto c = [](auto x) -> int {
return x % 4;
} ;
std::vector<StringHashFunction> funcs = {
hash1, hash2
};
std::string test = "test";
std::string kifla = "kifla";
BloomFilter<std::string, 64> bloom(funcs);
bool contains_test = bloom.contains(test);
ASSERT_EQ(contains_test, false);
bloom.insert(test);
contains_test = bloom.contains(test);
ASSERT_EQ(contains_test, true);
std::string test = "test";
std::string kifla = "kifla";
std::cout << hash1(test) << std::endl;
std::cout << hash2(test) << std::endl;
std::cout << hash1(kifla) << std::endl;
std::cout << hash2(kifla) << std::endl;
std::cout << bloom.contains(test) << std::endl;
bloom.insert(test);
std::cout << bloom.contains(test) << std::endl;
std::cout << bloom.contains(kifla) << std::endl;
bloom.insert(kifla);
std::cout << bloom.contains(kifla) << std::endl;
bool contains_kifla = bloom.contains(kifla);
ASSERT_EQ(contains_kifla, false);
bloom.insert(kifla);
contains_kifla = bloom.contains(kifla);
ASSERT_EQ(contains_kifla, true);
}
#pragma clang diagnostic pop
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,24 @@
#include "gtest/gtest.h"
#include "utils/memory/block_allocator.hpp"
TEST(BlockAllocatorTest, UnusedVsReleaseSize)
{
BlockAllocator<64> block_allocator(10);
void *block = block_allocator.acquire();
block_allocator.release(block);
EXPECT_EQ(block_allocator.unused_size(), 9);
EXPECT_EQ(block_allocator.release_size(), 1);
}
TEST(BlockAllocatorTest, CountMallocAndFreeCalls)
{
// TODO: implementation
EXPECT_EQ(true, true);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -5,15 +5,16 @@
#include <iostream>
#include <vector>
#include "gtest/gtest.h"
#include "communication/bolt/v1/transport/chunked_decoder.hpp"
using byte = unsigned char;
void print_hex(byte x) { printf("%02X ", static_cast<byte>(x)); }
class DummyStream
struct DummyStream
{
public:
void write(const byte *values, size_t n)
{
data.insert(data.end(), values, values + n);
@ -35,25 +36,33 @@ static constexpr size_t N = std::extent<decltype(chunks)>::value;
std::string decoded = "A quick brown fox jumps over a lazy dog";
int main(void)
TEST(ChunkedDecoderTest, WriteString)
{
// DummyStream stream;
// Decoder decoder(stream);
DummyStream stream;
Decoder decoder(stream);
// for(size_t i = 0; i < N; ++i)
// {
// auto& chunk = chunks[i];
// auto finished = decoder.decode(chunk.data(), chunk.size());
for(size_t i = 0; i < N; ++i)
{
auto & chunk = chunks[i];
logging::info("Chunk size: {}", chunk.size());
// // break early if finished
// if(finished)
// break;
// }
const byte* start = chunk.data();
auto finished = decoder.decode(start, chunk.size());
// assert(decoded.size() == stream.data.size());
// break early if finished
if(finished)
break;
}
// for(size_t i = 0; i < decoded.size(); ++i)
// assert(decoded[i] == stream.data[i]);
return 0;
// check validity
ASSERT_EQ(decoded.size(), stream.data.size());
for(size_t i = 0; i < decoded.size(); ++i)
ASSERT_EQ(decoded[i], stream.data[i]);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -3,6 +3,8 @@
#include <iostream>
#include <vector>
#include "gtest/gtest.h"
#include "communication/bolt/v1/transport/chunked_encoder.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
@ -54,61 +56,68 @@ void write_ff(Encoder &encoder, size_t n)
void check_ff(DummyStream &stream, size_t n)
{
for (size_t i = 0; i < n; ++i)
assert(stream.pop() == byte('\xFF'));
ASSERT_EQ(stream.pop(), byte('\xFF'));
(void)stream;
}
int main(void)
using encoder_t = bolt::ChunkedEncoder<DummyStream>;
TEST(ChunkedEncoderTest, Encode)
{
// TODO: write new test
// logging::init_async();
// logging::log->pipe(std::make_unique<Stdout>());
// DummyStream stream;
// bolt::ChunkedEncoder<DummyStream> encoder(stream);
DummyStream stream;
encoder_t encoder(stream);
size_t chunk_size = encoder_t::chunk_size;
// write_ff(encoder, 10);
// write_ff(encoder, 10);
// encoder.flush();
write_ff(encoder, 10);
write_ff(encoder, 10);
encoder.write_chunk();
// write_ff(encoder, 10);
// write_ff(encoder, 10);
// encoder.flush();
write_ff(encoder, 10);
write_ff(encoder, 10);
encoder.write_chunk();
// // this should be two chunks, one of size 65533 and the other of size 1467
// write_ff(encoder, 67000);
// encoder.flush();
// this should be two chunks, one of size 65533 and the other of size 1467
write_ff(encoder, 67000);
encoder.write_chunk();
// for (int i = 0; i < 10000; ++i)
// write_ff(encoder, 1500);
// encoder.flush();
for (int i = 0; i < 10000; ++i)
write_ff(encoder, 1500);
encoder.write_chunk();
// assert(stream.pop_size() == 20);
// check_ff(stream, 20);
// assert(stream.pop_size() == 0);
ASSERT_EQ(stream.pop_size(), 20);
check_ff(stream, 20);
ASSERT_EQ(stream.pop_size(), 0);
// assert(stream.pop_size() == 20);
// check_ff(stream, 20);
// assert(stream.pop_size() == 0);
ASSERT_EQ(stream.pop_size(), 20);
check_ff(stream, 20);
ASSERT_EQ(stream.pop_size(), 0);
// assert(stream.pop_size() == encoder.chunk_size);
// check_ff(stream, encoder.chunk_size);
// assert(stream.pop_size() == 1467);
// check_ff(stream, 1467);
// assert(stream.pop_size() == 0);
ASSERT_EQ(stream.pop_size(), chunk_size);
check_ff(stream, chunk_size);
ASSERT_EQ(stream.pop_size(), 0);
// size_t k = 10000 * 1500;
ASSERT_EQ(stream.pop_size(), 1467);
check_ff(stream, 1467);
ASSERT_EQ(stream.pop_size(), 0);
// while (k > 0) {
// auto size = k > encoder.chunk_size ? encoder.chunk_size : k;
// assert(stream.pop_size() == size);
// check_ff(stream, size);
size_t k = 10000 * 1500;
// k -= size;
// }
// assert(stream.pop_size() == 0);
return 0;
while (k > 0) {
auto size = k > chunk_size ? chunk_size : k;
ASSERT_EQ(stream.pop_size(), size);
check_ff(stream, size);
ASSERT_EQ(stream.pop_size(), 0);
k -= size;
}
ASSERT_EQ(stream.pop_size(), 0);
}
int main(int argc, char **argv)
{
logging::init_sync();
logging::log->pipe(std::make_unique<Stdout>());
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,18 +1,17 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "data_structures/concurrent/concurrent_list.hpp"
TEST_CASE("Conncurent List insert")
TEST(ConncurentList, Insert)
{
ConcurrentList<int> list;
auto it = list.begin();
it.push(32);
it.reset();
REQUIRE(*it == 32);
ASSERT_EQ(*it, 32);
}
TEST_CASE("Conncurent List iterate")
TEST(ConncurentList, Iterate)
{
ConcurrentList<int> list;
auto it = list.begin();
@ -22,33 +21,33 @@ TEST_CASE("Conncurent List iterate")
it.push(0);
it.reset();
REQUIRE(*it == 0);
ASSERT_EQ(*it, 0);
it++;
REQUIRE(*it == 9);
ASSERT_EQ(*it, 9);
it++;
REQUIRE(*it == 7);
ASSERT_EQ(*it, 7);
it++;
REQUIRE(*it == 32);
ASSERT_EQ(*it, 32);
it++;
REQUIRE(it == list.end());
ASSERT_EQ(it, list.end());
}
TEST_CASE("Conncurent List head remove")
TEST(ConncurentList, RemoveHead)
{
ConcurrentList<int> list;
auto it = list.begin();
it.push(32);
it.reset();
REQUIRE(it.remove());
REQUIRE(it.is_removed());
REQUIRE(!it.remove());
ASSERT_EQ(it.remove(), true);
ASSERT_EQ(it.is_removed(), true);
ASSERT_EQ(!it.remove(), true);
it.reset();
REQUIRE(it == list.end());
ASSERT_EQ(it, list.end());
}
TEST_CASE("Conncurent List remove")
TEST(ConncurentList, Remove)
{
ConcurrentList<int> list;
auto it = list.begin();
@ -60,16 +59,22 @@ TEST_CASE("Conncurent List remove")
it++;
it++;
REQUIRE(it.remove());
REQUIRE(it.is_removed());
REQUIRE(!it.remove());
ASSERT_EQ(it.remove(), true);
ASSERT_EQ(it.is_removed(), true);
ASSERT_EQ(!it.remove(), true);
it.reset();
REQUIRE(*it == 0);
ASSERT_EQ(*it, 0);
it++;
REQUIRE(*it == 9);
ASSERT_EQ(*it, 9);
it++;
REQUIRE(*it == 32);
ASSERT_EQ(*it, 32);
it++;
REQUIRE(it == list.end());
ASSERT_EQ(it, list.end());
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,5 +1,7 @@
#include <iostream>
#include "gtest/gtest.h"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "data_structures/concurrent/concurrent_map.hpp"
@ -7,26 +9,18 @@
#include "logging/streams/stdout.hpp"
#include "utils/assert.hpp"
using std::cout;
using std::endl;
using skiplist_t = ConcurrentMap<int, int>;
void print_skiplist(const skiplist_t::Accessor &skiplist)
{
cout << "---- skiplist now has: ";
logging::info("Skiplist now has: ");
for (auto &kv : skiplist)
cout << "(" << kv.first << ", " << kv.second << ") ";
cout << "----" << endl;
logging::info(" ({}, {})", kv.first, kv.second);
}
int main(void)
TEST(ConcurrentMapSkiplist, Mix)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
skiplist_t skiplist;
auto accessor = skiplist.access();
@ -71,6 +65,13 @@ int main(void)
"insert unique element");
print_skiplist(accessor);
return 0;
}
int main(int argc, char **argv)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,33 +1,28 @@
#include <iostream>
#include "gtest/gtest.h"
#include "data_structures/concurrent/concurrent_set.hpp"
#include "logging/default.hpp"
#include "logging/streams/stdout.hpp"
#include "utils/assert.hpp"
using std::cout;
using std::endl;
void print_skiplist(const ConcurrentSet<int>::Accessor &skiplist)
{
cout << "---- skiplist set now has: ";
logging::info("Skiplist set now has:");
for (auto &item : skiplist)
cout << item << ", ";
cout << "----" << endl;
logging::info("{}", item);
}
int main(void)
TEST(ConcurrentSet, Mix)
{
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
ConcurrentSet<int> set;
auto accessor = set.access();
cout << std::boolalpha;
permanent_assert(accessor.insert(1).second == true,
"added non-existing 1? (true)");
@ -57,6 +52,10 @@ int main(void)
permanent_assert(accessor.insert(4).second == true, "add 4");
print_skiplist(accessor);
return 0;
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,13 +1,12 @@
#include <iostream>
#include "gtest/gtest.h"
#include "query/backend/cpp_old/entity_search.hpp"
#include "utils/assert.hpp"
#include "utils/underlying_cast.hpp"
using std::cout;
using std::endl;
int main()
TEST(CypherStateMachine, Basic)
{
// initialize cypher state machine
CypherStateMachine csm;
@ -31,6 +30,10 @@ int main()
// check minimum cost
permanent_assert(csm.min("n") == entity_search::search_label_index,
"Search place should be label index");
return 0;
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,35 +0,0 @@
#include <iostream>
#include <utility>
#include "storage/indexes/index.hpp"
// boilerplate
using std::cout;
using std::endl;
// types
using StringUniqueKeyAsc = UniqueKeyAsc<std::shared_ptr<std::string>>;
using index_t = Index<StringUniqueKeyAsc, std::string>;
int main(void)
{
// index creation
auto index = std::make_shared<index_t>();
// prepare values
StringUniqueKeyAsc key(std::make_shared<std::string>("test_key"));
auto value_ptr = std::make_shared<std::string>("test_value");
// insert into and unpack pair
index_t::skiplist_t::Iterator find_iterator;
bool insertion_succeeded;
std::tie(find_iterator, insertion_succeeded) =
index->insert(key, value_ptr.get());
assert(insertion_succeeded == true);
// get inserted value
auto inserted_value = *index->find(key);
assert(*inserted_value.second == *value_ptr);
return 0;
}

View File

@ -1,20 +1,25 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "data_structures/bitset/dynamic_bitset.hpp"
TEST_CASE("Dynamic bitset basic functionality")
TEST(DynamicBitset, BasicFunctionality)
{
DynamicBitset<> db;
db.set(222555, 1);
bool value = db.at(222555, 1);
REQUIRE(value == true);
ASSERT_EQ(value, true);
db.set(32, 1);
value = db.at(32, 1);
REQUIRE(value == true);
ASSERT_EQ(value, true);
db.clear(32, 1);
value = db.at(32, 1);
REQUIRE(value == false);
ASSERT_EQ(value, false);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,83 +0,0 @@
// TODO: include this into CMakeLists
// compile the shared library
// clang++ -std=c++1y mysql.cpp -o ../tmp/mysql.so -shared -fPIC
// clang++ -std=c++1y memsql.cpp -o ../tmp/memsql.so -shared -fPIC
// clang++ -std=c++1y dynamic_lib.cpp -o test.out -ldl
#include <iostream>
#include <fstream>
#include <vector>
#include <iterator>
#include <cstdlib>
#include "dc/dynamic_lib.hpp"
#include "utils/string/file.hpp"
class db
{
public:
// If virtual methods don't have = 0 the compiler
// won't create appropriate _ZTI symbol inside
// the .so lib. That will lead to undefined symbol
// error while the library is loading.
//
// TODO: why?
virtual void name() const = 0;
virtual void type() const = 0;
virtual ~db() {}
};
typedef db* (*produce_t)();
typedef void (*destruct_t)(db*);
using std::cout;
using std::endl;
// dependent on specific dynamic code
// "configuration" of DynamicLib
// DynamicLib<MemgraphDynamicLib>
class MemgraphDynamicLib
{
public:
const static std::string produce_name;
const static std::string destruct_name;
using produce = produce_t;
using destruct = destruct_t;
};
const std::string MemgraphDynamicLib::produce_name = "produce";
const std::string MemgraphDynamicLib::destruct_name = "destruct";
int main()
{
// -- compile example
// string tmp_file_path = "tmp/tmp.cpp";
// string tmp_so_path = "tmp/tmp.so";
// string for_compile = "#include <iostream>\nint main() { std::cout << \"test\" << std::endl; return 0; }";
// write(tmp_file_path, for_compile);
// string test_command = prints("clang++", tmp_file_path, "-o", "test.out");
// system(test_command.c_str());
// -- end compile example
// -- load example
using db_lib = DynamicLib<MemgraphDynamicLib>;
db_lib mysql_db("./tmp/mysql.so");
mysql_db.load();
auto mysql = mysql_db.produce_method();
if (mysql) {
mysql->name();
}
mysql_db.destruct_method(mysql);
db_lib memsql_db("./tmp/memsql.so");
memsql_db.load();
auto memsql = memsql_db.produce_method();
if (memsql) {
memsql->name();
}
memsql_db.destruct_method(memsql);
return 0;
}

View File

@ -1,11 +0,0 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "data_structures/map/hashmap.hpp"
TEST_CASE("Lockfree HashMap basic functionality")
{
lockfree::HashMap<int, int> hashmap;
hashmap.put(32, 10);
REQUIRE(hashmap.at(32) == 10);
}

View File

@ -1,4 +1,4 @@
#include <iostream>
#include "gtest/gtest.h"
#include "query/backend/cpp_old/query_action_data.hpp"
#include "utils/assert.hpp"
@ -6,14 +6,19 @@
using ParameterIndexKey::Type::InternalId;
using ParameterIndexKey::Type::Projection;
auto main() -> int
TEST(ParameterIndexKey, Basic)
{
std::map<ParameterIndexKey, uint64_t> parameter_index;
std::map<ParameterIndexKey, uint64_t> parameter_index;
parameter_index[ParameterIndexKey(InternalId, "n1")] = 0;
parameter_index[ParameterIndexKey(InternalId, "n2")] = 1;
permanent_assert(parameter_index.size() == 2, "Parameter index size should be 2");
return 0;
permanent_assert(parameter_index.size() == 2,
"Parameter index size should be 2");
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,85 +1,96 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "utils/command_line/arguments.hpp"
// beacuse of c++ 11
// TODO: figure out better solution
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wwritable-strings"
TEST_CASE("ProgramArgument FlagOnly Test") {
CLEAR_ARGS();
TEST(ProgramArgument, FlagOnly)
{
CLEAR_ARGS();
int argc = 2;
char* argv[] = {"ProgramArgument FlagOnly Test", "-test"};
int argc = 2;
char *argv[] = {"ProgramArgument FlagOnly Test", "-test"};
REGISTER_ARGS(argc, argv);
REGISTER_REQUIRED_ARGS({"-test"});
REGISTER_ARGS(argc, argv);
REGISTER_REQUIRED_ARGS({"-test"});
REQUIRE(CONTAINS_FLAG("-test") == true);
ASSERT_EQ(CONTAINS_FLAG("-test"), true);
}
TEST_CASE("ProgramArgument Single Entry Test") {
CLEAR_ARGS();
TEST(ProgramArgument, SingleEntry)
{
CLEAR_ARGS();
int argc = 3;
char* argv[] = {"ProgramArgument Single Entry Test", "-bananas", "99"};
int argc = 3;
char *argv[] = {"ProgramArgument Single Entry Test", "-bananas", "99"};
REGISTER_REQUIRED_ARGS({"-bananas"});
REGISTER_ARGS(argc, argv);
REGISTER_REQUIRED_ARGS({"-bananas"});
REGISTER_ARGS(argc, argv);
REQUIRE(GET_ARG("-bananas", "100").get_int() == 99);
ASSERT_EQ(GET_ARG("-bananas", "100").get_int(), 99);
}
TEST_CASE("ProgramArgument Multiple Entries Test") {
CLEAR_ARGS();
TEST(ProgramArgument, MultipleEntries)
{
CLEAR_ARGS();
int argc = 4;
char* argv[] = {"ProgramArgument Multiple Entries Test", "-files",
"first_file.txt", "second_file.txt"};
int argc = 4;
char *argv[] = {"ProgramArgument Multiple Entries Test", "-files",
"first_file.txt", "second_file.txt"};
REGISTER_ARGS(argc, argv);
REGISTER_ARGS(argc, argv);
auto files = GET_ARGS("-files", {});
auto files = GET_ARGS("-files", {});
REQUIRE(files[0].get_string() == "first_file.txt");
ASSERT_EQ(files[0].get_string(), "first_file.txt");
}
TEST_CASE("ProgramArgument Combination Test") {
CLEAR_ARGS();
TEST(ProgramArgument, Combination)
{
CLEAR_ARGS();
int argc = 14;
char* argv[] = {"ProgramArgument Combination Test",
"-run_tests",
"-tests",
"Test1",
"Test2",
"Test3",
"-run_times",
"10",
"-export",
"test1.txt",
"test2.txt",
"test3.txt",
"-import",
"data.txt"};
int argc = 14;
char *argv[] = {"ProgramArgument Combination Test",
"-run_tests",
"-tests",
"Test1",
"Test2",
"Test3",
"-run_times",
"10",
"-export",
"test1.txt",
"test2.txt",
"test3.txt",
"-import",
"data.txt"};
REGISTER_ARGS(argc, argv);
REGISTER_ARGS(argc, argv);
REQUIRE(CONTAINS_FLAG("-run_tests") == true);
ASSERT_EQ(CONTAINS_FLAG("-run_tests"), true);
auto tests = GET_ARGS("-tests", {});
REQUIRE(tests[0].get_string() == "Test1");
REQUIRE(tests[1].get_string() == "Test2");
REQUIRE(tests[2].get_string() == "Test3");
auto tests = GET_ARGS("-tests", {});
ASSERT_EQ(tests[0].get_string(), "Test1");
ASSERT_EQ(tests[1].get_string(), "Test2");
ASSERT_EQ(tests[2].get_string(), "Test3");
REQUIRE(GET_ARG("-run_times", "0").get_int() == 10);
ASSERT_EQ(GET_ARG("-run_times", "0").get_int(), 10);
auto exports = GET_ARGS("-export", {});
REQUIRE(exports[0].get_string() == "test1.txt");
REQUIRE(exports[1].get_string() == "test2.txt");
REQUIRE(exports[2].get_string() == "test3.txt");
auto exports = GET_ARGS("-export", {});
ASSERT_EQ(exports[0].get_string(), "test1.txt");
ASSERT_EQ(exports[1].get_string(), "test2.txt");
ASSERT_EQ(exports[2].get_string(), "test3.txt");
REQUIRE(GET_ARG("-import", "test.txt").get_string() == "data.txt");
ASSERT_EQ(GET_ARG("-import", "test.txt").get_string(), "data.txt");
}
#pragma clang diagnostic pop
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,25 +1,30 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "data_structures/ptr_int.hpp"
TEST_CASE("Size of pointer integer object")
TEST(PtrInt, SizeOf)
{
REQUIRE(sizeof(PtrInt<int *, 1, int>) == sizeof(uintptr_t));
ASSERT_EQ(sizeof(PtrInt<int *, 1, int>), sizeof(uintptr_t));
}
TEST_CASE("Construct and read pointer integer pair type")
TEST(PtrInt, ConstructionAndRead)
{
auto ptr1 = std::make_unique<int>(2);
PtrInt<int *, 2, int> pack1(ptr1.get(), 1);
REQUIRE(pack1.get_int() == 1);
REQUIRE(pack1.get_ptr() == ptr1.get());
ASSERT_EQ(pack1.get_int(), 1);
ASSERT_EQ(pack1.get_ptr(), ptr1.get());
auto ptr2 = std::make_unique<int>(2);
PtrInt<int *, 3, int> pack2(ptr2.get(), 4);
REQUIRE(pack2.get_int() == 4);
REQUIRE(pack2.get_ptr() == ptr2.get());
ASSERT_EQ(pack2.get_int(), 4);
ASSERT_EQ(pack2.get_ptr(), ptr2.get());
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,5 +1,4 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "data_structures/map/rh_hashmap.hpp"
@ -18,82 +17,82 @@ public:
void cross_validate(RhHashMap<int, Data> &map, std::map<int, Data *> &s_map);
TEST_CASE("Robin hood hashmap basic functionality")
TEST(RobinHoodHashmap, BasicFunctionality)
{
RhHashMap<int, Data> map;
REQUIRE(map.size() == 0);
REQUIRE(map.insert(new Data(0)));
REQUIRE(map.size() == 1);
ASSERT_EQ(map.size(), 0);
ASSERT_EQ(map.insert(new Data(0)), true);
ASSERT_EQ(map.size(), 1);
}
TEST_CASE("Robin hood hashmap remove functionality")
TEST(RobinHoodHashmap, RemoveFunctionality)
{
RhHashMap<int, Data> map;
REQUIRE(map.insert(new Data(0)));
REQUIRE(map.remove(0).is_present());
REQUIRE(map.size() == 0);
REQUIRE(!map.find(0).is_present());
ASSERT_EQ(map.insert(new Data(0)), true);
ASSERT_EQ(map.remove(0).is_present(), true);
ASSERT_EQ(map.size(), 0);
ASSERT_EQ(!map.find(0).is_present(), true);
}
TEST_CASE("Robin hood hashmap insert/get check")
TEST(RobinHoodHashmap, InsertGetCheck)
{
RhHashMap<int, Data> map;
REQUIRE(!map.find(0).is_present());
ASSERT_EQ(!map.find(0).is_present(), true);
auto ptr0 = new Data(0);
REQUIRE(map.insert(ptr0));
REQUIRE(map.find(0).is_present());
REQUIRE(map.find(0).get() == ptr0);
ASSERT_EQ(map.insert(ptr0), true);
ASSERT_EQ(map.find(0).is_present(), true);
ASSERT_EQ(map.find(0).get(), ptr0);
}
TEST_CASE("Robin hood hashmap double insert")
TEST(RobinHoodHashmap, DoubleInsert)
{
RhHashMap<int, Data> map;
REQUIRE(map.insert(new Data(0)));
REQUIRE(!map.insert(new Data(0)));
ASSERT_EQ(map.insert(new Data(0)), true);
ASSERT_EQ(!map.insert(new Data(0)), true);
}
TEST_CASE("Robin hood hashmap")
TEST(RobinHoodHashmap, FindInsertFind)
{
RhHashMap<int, Data> map;
for (int i = 0; i < 128; i++) {
REQUIRE(!map.find(i).is_present());
REQUIRE(map.insert(new Data(i)));
REQUIRE(map.find(i).is_present());
ASSERT_EQ(!map.find(i).is_present(), true);
ASSERT_EQ(map.insert(new Data(i)), true);
ASSERT_EQ(map.find(i).is_present(), true);
}
for (int i = 0; i < 128; i++) {
REQUIRE(map.find(i).is_present());
REQUIRE(map.find(i).get()->get_key() == i);
ASSERT_EQ(map.find(i).is_present(), true);
ASSERT_EQ(map.find(i).get()->get_key(), i);
}
}
TEST_CASE("Robin hood hashmap iterate")
TEST(RobinHoodHashmap, Iterate)
{
RhHashMap<int, Data> map;
for (int i = 0; i < 128; i++) {
REQUIRE(!map.find(i).is_present());
REQUIRE(map.insert(new Data(i)));
REQUIRE(map.find(i).is_present());
ASSERT_EQ(!map.find(i).is_present(), true);
ASSERT_EQ(map.insert(new Data(i)), true);
ASSERT_EQ(map.find(i).is_present(), true);
}
bool seen[128] = {false};
for (auto e : map) {
auto key = e->get_key();
REQUIRE(!seen[key]);
ASSERT_EQ(!seen[key], true);
seen[key] = true;
}
for (int i = 0; i < 128; i++) {
REQUIRE(seen[i]);
ASSERT_EQ(seen[i], true);
}
}
TEST_CASE("Robin hood hashmap checked")
TEST(RobinHoodHashmap, Checked)
{
RhHashMap<int, Data> map;
std::map<int, Data *> s_map;
@ -102,17 +101,17 @@ TEST_CASE("Robin hood hashmap checked")
int key = std::rand();
auto data = new Data(key);
if (map.insert(data)) {
REQUIRE(s_map.find(key) == s_map.end());
ASSERT_EQ(s_map.find(key), s_map.end());
s_map[key] = data;
} else {
REQUIRE(s_map.find(key) != s_map.end());
ASSERT_NE(s_map.find(key), s_map.end());
}
}
cross_validate(map, s_map);
}
TEST_CASE("Robin hood hashmap checked with remove")
TEST(RobinHoodHashMap, CheckWithRemove)
{
RhHashMap<int, Data> map;
std::map<int, Data *> s_map;
@ -121,12 +120,12 @@ TEST_CASE("Robin hood hashmap checked with remove")
int key = std::rand() % 100;
auto data = new Data(key);
if (map.insert(data)) {
REQUIRE(s_map.find(key) == s_map.end());
ASSERT_EQ(s_map.find(key), s_map.end());
s_map[key] = data;
cross_validate(map, s_map);
} else {
REQUIRE(map.remove(key).is_present());
REQUIRE(s_map.erase(key) == 1);
ASSERT_EQ(map.remove(key).is_present(), true);
ASSERT_EQ(s_map.erase(key), 1);
cross_validate(map, s_map);
}
}
@ -137,10 +136,16 @@ TEST_CASE("Robin hood hashmap checked with remove")
void cross_validate(RhHashMap<int, Data> &map, std::map<int, Data *> &s_map)
{
for (auto e : map) {
REQUIRE(s_map.find(e->get_key()) != s_map.end());
ASSERT_NE(s_map.find(e->get_key()), s_map.end());
}
for (auto e : s_map) {
REQUIRE(map.find(e.first).get() == e.second);
ASSERT_EQ(map.find(e.first).get(), e.second);
}
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,5 +1,4 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "data_structures/map/rh_hashmultimap.hpp"
@ -22,43 +21,43 @@ void cross_validate(RhHashMultiMap<int, Data> &map,
void cross_validate_weak(RhHashMultiMap<int, Data> &map,
std::multimap<int, Data *> &s_map);
TEST_CASE("Robin hood hashmultimap basic functionality")
TEST(RobinHoodHashmultimap, BasicFunctionality)
{
RhHashMultiMap<int, Data> map;
REQUIRE(map.size() == 0);
ASSERT_EQ(map.size(), 0);
map.add(new Data(0));
REQUIRE(map.size() == 1);
ASSERT_EQ(map.size(), 1);
}
TEST_CASE("Robin hood hashmultimap insert/get check")
TEST(RobinHoodHashmultimap, InsertGetCheck)
{
RhHashMultiMap<int, Data> map;
REQUIRE(map.find(0) == map.end());
ASSERT_EQ(map.find(0), map.end());
auto ptr0 = new Data(0);
map.add(ptr0);
REQUIRE(map.find(0) != map.end());
REQUIRE(*map.find(0) == ptr0);
ASSERT_NE(map.find(0), map.end());
ASSERT_EQ(*map.find(0), ptr0);
}
TEST_CASE("Robin hood hashmultimap extreme same key valus full")
TEST(RobinHoodHashmultimap, ExtremeSameKeyValusFull)
{
RhHashMultiMap<int, Data> map;
for (int i = 0; i < 128; i++) {
map.add(new Data(7));
}
REQUIRE(map.size() == 128);
REQUIRE(map.find(7) != map.end());
REQUIRE(map.find(0) == map.end());
ASSERT_EQ(map.size(), 128);
ASSERT_NE(map.find(7), map.end());
ASSERT_EQ(map.find(0), map.end());
auto ptr0 = new Data(0);
map.add(ptr0);
REQUIRE(map.find(0) != map.end());
REQUIRE(*map.find(0) == ptr0);
ASSERT_NE(map.find(0), map.end());
ASSERT_EQ(*map.find(0), ptr0);
}
TEST_CASE("Robin hood hashmultimap extreme same key valus full with remove")
TEST(RobinHoodHashmultimap, ExtremeSameKeyValusFullWithRemove)
{
RhHashMultiMap<int, Data> map;
@ -67,25 +66,25 @@ TEST_CASE("Robin hood hashmultimap extreme same key valus full with remove")
}
auto ptr = new Data(7);
map.add(ptr);
REQUIRE(map.size() == 128);
REQUIRE(!map.remove(new Data(0)));
REQUIRE(map.remove(ptr));
ASSERT_EQ(map.size(), 128);
ASSERT_EQ(!map.remove(new Data(0)), true);
ASSERT_EQ(map.remove(ptr), true);
}
TEST_CASE("Robin hood hasmultihmap remove functionality")
TEST(RobinHoodHasmultihmap, RemoveFunctionality)
{
RhHashMultiMap<int, Data> map;
REQUIRE(map.find(0) == map.end());
ASSERT_EQ(map.find(0), map.end());
auto ptr0 = new Data(0);
map.add(ptr0);
REQUIRE(map.find(0) != map.end());
REQUIRE(*map.find(0) == ptr0);
REQUIRE(map.remove(ptr0));
REQUIRE(map.find(0) == map.end());
ASSERT_NE(map.find(0), map.end());
ASSERT_EQ(*map.find(0), ptr0);
ASSERT_EQ(map.remove(ptr0), true);
ASSERT_EQ(map.find(0), map.end());
}
TEST_CASE("Robin hood hashmultimap double insert")
TEST(RobinHoodHashmultimap, DoubleInsert)
{
RhHashMultiMap<int, Data> map;
@ -103,48 +102,48 @@ TEST_CASE("Robin hood hashmultimap double insert")
ptr1 = nullptr;
continue;
}
REQUIRE(false);
ASSERT_EQ(true, false);
}
}
TEST_CASE("Robin hood hashmultimap")
TEST(RobinHoodHashmultimap, FindAddFind)
{
RhHashMultiMap<int, Data> map;
for (int i = 0; i < 128; i++) {
REQUIRE(map.find(i) == map.end());
ASSERT_EQ(map.find(i), map.end());
map.add(new Data(i));
REQUIRE(map.find(i) != map.end());
ASSERT_NE(map.find(i), map.end());
}
for (int i = 0; i < 128; i++) {
REQUIRE(map.find(i) != map.end());
REQUIRE(map.find(i)->get_key() == i);
ASSERT_NE(map.find(i), map.end());
ASSERT_EQ(map.find(i)->get_key(), i);
}
}
TEST_CASE("Robin hood hashmultimap iterate")
TEST(RobinHoodHashmultimap, Iterate)
{
RhHashMultiMap<int, Data> map;
for (int i = 0; i < 128; i++) {
REQUIRE(map.find(i) == map.end());
ASSERT_EQ(map.find(i), map.end());
map.add(new Data(i));
REQUIRE(map.find(i) != map.end());
ASSERT_NE(map.find(i), map.end());
}
bool seen[128] = {false};
for (auto e : map) {
auto key = e->get_key();
REQUIRE(!seen[key]);
ASSERT_EQ(!seen[key], true);
seen[key] = true;
}
for (int i = 0; i < 128; i++) {
REQUIRE(seen[i]);
ASSERT_EQ(seen[i], true);
}
}
TEST_CASE("Robin hood hashmultimap checked")
TEST(RobinHoodHashmultimap, Checked)
{
RhHashMultiMap<int, Data> map;
std::multimap<int, Data *> s_map;
@ -159,7 +158,7 @@ TEST_CASE("Robin hood hashmultimap checked")
cross_validate(map, s_map);
}
TEST_CASE("Robin hood hashmultimap checked rand")
TEST(RobinHoodHashmultimap, CheckedRand)
{
RhHashMultiMap<int, Data> map;
std::multimap<int, Data *> s_map;
@ -174,7 +173,7 @@ TEST_CASE("Robin hood hashmultimap checked rand")
cross_validate(map, s_map);
}
TEST_CASE("Robin hood hashmultimap with remove data checked")
TEST(RobinHoodHashmultimap, WithRemoveDataChecked)
{
RhHashMultiMap<int, Data> map;
std::multimap<int, Data *> s_map;
@ -185,10 +184,10 @@ TEST_CASE("Robin hood hashmultimap with remove data checked")
if ((std::rand() % 2) == 0) {
auto it = s_map.find(key);
if (it == s_map.end()) {
REQUIRE(map.find(key) == map.end());
ASSERT_EQ(map.find(key), map.end());
} else {
s_map.erase(it);
REQUIRE(map.remove(it->second));
ASSERT_EQ(map.remove(it->second), true);
}
} else {
auto data = new Data(key);
@ -210,7 +209,7 @@ void cross_validate(RhHashMultiMap<int, Data> &map,
while (it != s_map.end() && it->second != e) {
it++;
}
REQUIRE(it != s_map.end());
ASSERT_NE(it, s_map.end());
}
for (auto e : s_map) {
@ -219,7 +218,7 @@ void cross_validate(RhHashMultiMap<int, Data> &map,
while (it != map.end() && *it != e.second) {
it++;
}
REQUIRE(it != map.end());
ASSERT_NE(it, map.end());
}
}
@ -238,7 +237,7 @@ void cross_validate_weak(RhHashMultiMap<int, Data> &map,
it++;
count--;
}
REQUIRE(count == 0);
ASSERT_EQ(count, 0);
key = e->get_key();
count = 1;
}
@ -250,7 +249,7 @@ void cross_validate_weak(RhHashMultiMap<int, Data> &map,
it++;
count--;
}
REQUIRE(count == 0);
ASSERT_EQ(count, 0);
}
for (auto e : s_map) {
@ -263,7 +262,7 @@ void cross_validate_weak(RhHashMultiMap<int, Data> &map,
it++;
count--;
}
REQUIRE(count == 0);
ASSERT_EQ(count, 0);
key = e.first;
count = 1;
}
@ -275,6 +274,6 @@ void cross_validate_weak(RhHashMultiMap<int, Data> &map,
it++;
count--;
}
REQUIRE(count == 0);
ASSERT_EQ(count, 0);
}
}

View File

@ -1,19 +1,25 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include <iostream>
#include <string>
#include <utility>
#include "utils/signals/handler.hpp"
#include "utils/stacktrace.hpp"
#include "utils/stacktrace/stacktrace.hpp"
TEST_CASE("SignalHandler Segmentation Fault Test") {
SignalHandler::register_handler(Signal::SegmentationFault, []() {
std::cout << "Segmentation Fault" << std::endl;
Stacktrace stacktrace;
std::cout << stacktrace.dump() << std::endl;
});
TEST(SignalHandler, SegmentationFaultTest)
{
SignalHandler::register_handler(Signal::SegmentationFault, []() {
std::cout << "Segmentation Fault" << std::endl;
Stacktrace stacktrace;
std::cout << stacktrace.dump() << std::endl;
});
std::raise(SIGSEGV);
std::raise(SIGSEGV);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -0,0 +1,34 @@
#include "gtest/gtest.h"
#include "utils/memory/stack_allocator.hpp"
struct Object
{
int a;
int b;
Object(int a, int b) : a(a), b(b) {}
};
TEST(StackAllocatorTest, AllocationAndObjectValidity)
{
StackAllocator allocator;
for (int i = 0; i < 64 * 1024; ++i)
{
auto object = allocator.make<Object>(1, 2);
ASSERT_EQ(object->a, 1);
ASSERT_EQ(object->b, 2);
}
}
TEST(StackAllocatorTest, CountMallocAndFreeCalls)
{
// TODO: implementation
EXPECT_EQ(true, true);
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,12 +1,17 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include "gtest/gtest.h"
#include "template_engine/engine.hpp"
TEST_CASE("Template Engine - basic placeholder replacement")
TEST(TemplateEngine, BasicPlaceholderReplacement)
{
auto rendered = template_engine::render("{{one}} {{two}}",
{{"one", "two"}, {"two", "one"}});
REQUIRE(rendered == "two one");
ASSERT_EQ(rendered, "two one");
}
int main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}