Added documentation.
Fixed test for index.
This commit is contained in:
parent
ee23b0204d
commit
d806d635f9
@ -10,8 +10,8 @@
|
|||||||
// for querys.
|
// for querys.
|
||||||
namespace barrier
|
namespace barrier
|
||||||
{
|
{
|
||||||
// Every class which must be visible to outside the barrier should have there
|
// Every class from database which must be visible to outside the barrier should
|
||||||
// barrier class defined here.
|
// have there barrier class defined here.
|
||||||
|
|
||||||
// ************ Here should be forward declarations of Sized barrier classes
|
// ************ Here should be forward declarations of Sized barrier classes
|
||||||
// ACCESSORS
|
// ACCESSORS
|
||||||
@ -298,7 +298,8 @@ public:
|
|||||||
Count count();
|
Count count();
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: Find reasons of such great size ant try to decrease it.
|
// NOTE: This large size is because of SkipList accessor which is embeded into
|
||||||
|
// iterator. The accessor has 64 fields of pointers which is in total 512 B.
|
||||||
class VertexAccessIterator
|
class VertexAccessIterator
|
||||||
: public Sized<560, 8>,
|
: public Sized<560, 8>,
|
||||||
public iter::Composable<const VertexAccessor, VertexAccessIterator>
|
public iter::Composable<const VertexAccessor, VertexAccessIterator>
|
||||||
@ -318,7 +319,8 @@ public:
|
|||||||
Count count();
|
Count count();
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: Find reasons of such great size ant try to decrease it.
|
// NOTE: This large size is because of SkipList accessor which is embeded into
|
||||||
|
// iterator. The accessor has 64 fields of pointers which is in total 512 B.
|
||||||
class EdgeAccessIterator
|
class EdgeAccessIterator
|
||||||
: public Sized<560, 8>,
|
: public Sized<560, 8>,
|
||||||
public iter::Composable<const EdgeAccessor, EdgeAccessIterator>
|
public iter::Composable<const EdgeAccessor, EdgeAccessIterator>
|
||||||
@ -476,7 +478,6 @@ public:
|
|||||||
void write(const EdgeAccessor &edge);
|
void write(const EdgeAccessor &edge);
|
||||||
void write(const VertexStoredProperty &prop);
|
void write(const VertexStoredProperty &prop);
|
||||||
void write(const EdgeStoredProperty &prop);
|
void write(const EdgeStoredProperty &prop);
|
||||||
void write_null();
|
|
||||||
void write(const Null &v);
|
void write(const Null &v);
|
||||||
void write(const Bool &prop);
|
void write(const Bool &prop);
|
||||||
void write(const Float &prop);
|
void write(const Float &prop);
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
// THis shoul be the only place to include code from memgraph other than
|
// This shoul be the only place to include code from memgraph other than
|
||||||
// barrier.cpp
|
// barrier.cpp
|
||||||
#include "mvcc/id.hpp"
|
#include "mvcc/id.hpp"
|
||||||
#include "storage/indexes/index_definition.hpp"
|
#include "storage/indexes/index_definition.hpp"
|
||||||
@ -28,30 +28,35 @@ namespace barrier
|
|||||||
x(ArrayInt64) x(ArrayFloat) x(ArrayDouble) x(ArrayBool) x(ArrayString)
|
x(ArrayInt64) x(ArrayFloat) x(ArrayDouble) x(ArrayBool) x(ArrayString)
|
||||||
|
|
||||||
// **************************** HELPER FUNCTIONS **************************** //
|
// **************************** HELPER FUNCTIONS **************************** //
|
||||||
|
// CASTS FROM& -> TO&
|
||||||
template <class TO, class FROM>
|
template <class TO, class FROM>
|
||||||
TO &ref_as(FROM &ref)
|
TO &ref_as(FROM &ref)
|
||||||
{
|
{
|
||||||
return (*reinterpret_cast<TO *>(&ref));
|
return (*reinterpret_cast<TO *>(&ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CASTS FROM const& -> TO const&
|
||||||
template <class TO, class FROM>
|
template <class TO, class FROM>
|
||||||
TO const &ref_as(FROM const &ref)
|
TO const &ref_as(FROM const &ref)
|
||||||
{
|
{
|
||||||
return (*reinterpret_cast<TO const *>(&ref));
|
return (*reinterpret_cast<TO const *>(&ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CASTS FROM* -> TO*
|
||||||
template <class TO, class FROM>
|
template <class TO, class FROM>
|
||||||
TO *ptr_as(FROM *ref)
|
TO *ptr_as(FROM *ref)
|
||||||
{
|
{
|
||||||
return (reinterpret_cast<TO *>(ref));
|
return (reinterpret_cast<TO *>(ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CASTS FROM const* -> TO const*
|
||||||
template <class TO, class FROM>
|
template <class TO, class FROM>
|
||||||
TO const *ptr_as(FROM const *ref)
|
TO const *ptr_as(FROM const *ref)
|
||||||
{
|
{
|
||||||
return (reinterpret_cast<TO const *>(ref));
|
return (reinterpret_cast<TO const *>(ref));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CASTS FROM&& -> TO&&
|
||||||
template <class TO, class FROM>
|
template <class TO, class FROM>
|
||||||
TO &&value_as(FROM &&ref)
|
TO &&value_as(FROM &&ref)
|
||||||
{
|
{
|
||||||
@ -61,6 +66,7 @@ TO &&value_as(FROM &&ref)
|
|||||||
return (reinterpret_cast<TO &&>(std::move(ref)));
|
return (reinterpret_cast<TO &&>(std::move(ref)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CASTS FROM const&& -> TO const&&
|
||||||
template <class TO, class FROM>
|
template <class TO, class FROM>
|
||||||
const TO &&value_as(const FROM &&ref)
|
const TO &&value_as(const FROM &&ref)
|
||||||
{
|
{
|
||||||
@ -71,7 +77,8 @@ const TO &&value_as(const FROM &&ref)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Barrier classes which will be used only through reference/pointer should
|
// Barrier classes which will be used only through reference/pointer should
|
||||||
// inherit this class.
|
// inherit this class. Outside of barrier derived classes will be used only
|
||||||
|
// through reference/pointer.
|
||||||
class Unsized
|
class Unsized
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -95,7 +102,7 @@ class Sized
|
|||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
// This will ensure that this/derived class can't be instantiated.
|
// This will ensure that this/derived class can't be instantiated.
|
||||||
// This way side outside the barrier can't "accidentaly" create this/derived
|
// Something outside the barrier can't "accidentaly" create this/derived
|
||||||
// type because that would be erroneous.
|
// type because that would be erroneous.
|
||||||
Sized() = delete;
|
Sized() = delete;
|
||||||
|
|
||||||
@ -123,28 +130,9 @@ protected:
|
|||||||
"Border class aligment mismatch");
|
"Border class aligment mismatch");
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
|
||||||
typename std::aligned_storage<size_B, alignment_B>::type &_data_ref()
|
|
||||||
{
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
typename std::aligned_storage<size_B, alignment_B>::type const &
|
|
||||||
_data_ref_const() const
|
|
||||||
{
|
|
||||||
return data;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Here is the aligned storage which imitates size and aligment of object of
|
// Here is the aligned storage which imitates size and aligment of object of
|
||||||
// original class from memgraph.
|
// original class from memgraph.
|
||||||
typename std::aligned_storage<size_B, alignment_B>::type data;
|
typename std::aligned_storage<size_B, alignment_B>::type data;
|
||||||
};
|
};
|
||||||
|
|
||||||
// HELPER FUNCTIONS
|
|
||||||
template <class R>
|
|
||||||
bool option_fill(Option<R> &o)
|
|
||||||
{
|
|
||||||
return o.is_present() && o.get().fill();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -160,6 +160,10 @@ namespace barrier
|
|||||||
// Blueprint for valid transformation of references:
|
// Blueprint for valid transformation of references:
|
||||||
// TRANSFORM_REF(, ::);
|
// TRANSFORM_REF(, ::);
|
||||||
// template <class T> TRANSFORM_REF_TEMPLATED(<T>,::<T>);
|
// template <class T> TRANSFORM_REF_TEMPLATED(<T>,::<T>);
|
||||||
|
// TODO: Strongest assurance that evertyhing is correct is for all of following
|
||||||
|
// transformation to use DUP for defining theres transformation. This would mean
|
||||||
|
// that names of classes exported in barrier and names from real class in
|
||||||
|
// database are equal.
|
||||||
|
|
||||||
// ***************** TRANSFORMS of reference and pointers
|
// ***************** TRANSFORMS of reference and pointers
|
||||||
DUP(Label, TRANSFORM_REF);
|
DUP(Label, TRANSFORM_REF);
|
||||||
|
@ -5,6 +5,8 @@
|
|||||||
|
|
||||||
using std::pair;
|
using std::pair;
|
||||||
|
|
||||||
|
// Item stored in skiplist. Used by ConcurrentMap and ConcurrentMultiMap to
|
||||||
|
// store key and value but to make ordering on keys.
|
||||||
template <typename K, typename T>
|
template <typename K, typename T>
|
||||||
class Item : public TotalOrdering<Item<K, T>>,
|
class Item : public TotalOrdering<Item<K, T>>,
|
||||||
public TotalOrdering<K, Item<K, T>>,
|
public TotalOrdering<K, Item<K, T>>,
|
||||||
@ -45,6 +47,8 @@ public:
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Common base for accessor of all derived containers(ConcurrentMap,
|
||||||
|
// ConcurrentSet, ...) from SkipList.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
class AccessorBase
|
class AccessorBase
|
||||||
{
|
{
|
||||||
|
@ -5,7 +5,8 @@
|
|||||||
#include <utility>
|
#include <utility>
|
||||||
#include "utils/crtp.hpp"
|
#include "utils/crtp.hpp"
|
||||||
|
|
||||||
// TODO: reimplement this
|
// TODO: reimplement this. It's correct but somewhat inefecient and it could be
|
||||||
|
// done better.
|
||||||
template <class T>
|
template <class T>
|
||||||
class ConcurrentList
|
class ConcurrentList
|
||||||
{
|
{
|
||||||
@ -24,7 +25,7 @@ private:
|
|||||||
|
|
||||||
template <class V>
|
template <class V>
|
||||||
static bool cas(std::atomic<V> &atomic, V expected, V desired)
|
static bool cas(std::atomic<V> &atomic, V expected, V desired)
|
||||||
{ // Could be relaxed must be atleast Release.
|
{ // Could be relaxed but must be at least Release.
|
||||||
return atomic.compare_exchange_strong(expected, desired,
|
return atomic.compare_exchange_strong(expected, desired,
|
||||||
std::memory_order_seq_cst);
|
std::memory_order_seq_cst);
|
||||||
}
|
}
|
||||||
@ -35,18 +36,28 @@ private:
|
|||||||
return atomic.exchange(desired, std::memory_order_seq_cst);
|
return atomic.exchange(desired, std::memory_order_seq_cst);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Basic element in a ConcurrentList
|
||||||
class Node
|
class Node
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
Node(const T &data) : data(data) {}
|
Node(const T &data) : data(data) {}
|
||||||
Node(T &&data) : data(std::move(data)) {}
|
Node(T &&data) : data(std::move(data)) {}
|
||||||
|
|
||||||
|
// Carried data
|
||||||
T data;
|
T data;
|
||||||
|
|
||||||
|
// Next element in list or nullptr if end.
|
||||||
std::atomic<Node *> next{nullptr};
|
std::atomic<Node *> next{nullptr};
|
||||||
|
|
||||||
|
// Next removed element in list or nullptr if end.
|
||||||
std::atomic<Node *> next_rem{nullptr};
|
std::atomic<Node *> next_rem{nullptr};
|
||||||
|
|
||||||
|
// True if node has logicaly been removed from list.
|
||||||
std::atomic<bool> removed{false};
|
std::atomic<bool> removed{false};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Base for Mutable and Immutable iterators. Also serves as accessor to the
|
||||||
|
// list uses for safe garbage disposall.
|
||||||
template <class It>
|
template <class It>
|
||||||
class IteratorBase : public Crtp<It>
|
class IteratorBase : public Crtp<It>
|
||||||
{
|
{
|
||||||
@ -58,7 +69,9 @@ private:
|
|||||||
IteratorBase(ConcurrentList *list) : list(list)
|
IteratorBase(ConcurrentList *list) : list(list)
|
||||||
{
|
{
|
||||||
assert(list != nullptr);
|
assert(list != nullptr);
|
||||||
|
// Increment number of iterators accessing list.
|
||||||
list->count++;
|
list->count++;
|
||||||
|
// Start from the begining of list.
|
||||||
reset();
|
reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,14 +93,19 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto head_rem = load(list->removed);
|
auto head_rem = load(list->removed);
|
||||||
|
|
||||||
|
// Next IF checks if this thread is responisble for disposall of
|
||||||
|
// collected garbage.
|
||||||
// Fetch could be relaxed
|
// Fetch could be relaxed
|
||||||
// There exist possibility that no one will delete garbage at this
|
// There exist possibility that no one will delete garbage at this
|
||||||
// time.
|
// time but it will be deleted at some other time.
|
||||||
if (list->count.fetch_sub(1) == 1 && head_rem != nullptr &&
|
if (list->count.fetch_sub(1) == 1 && // I am the last one accessing
|
||||||
cas<Node *>(
|
head_rem != nullptr && // There is some garbage
|
||||||
list->removed, head_rem,
|
cas<Node *>(list->removed, head_rem,
|
||||||
nullptr)) { // I am the last one and there is garbage to be
|
nullptr) // No new garbage was added.
|
||||||
// removed.
|
) {
|
||||||
|
// Delete all removed node following chain of next_rem starting
|
||||||
|
// from head_rem.
|
||||||
auto now = head_rem;
|
auto now = head_rem;
|
||||||
do {
|
do {
|
||||||
auto next = load(now->next_rem);
|
auto next = load(now->next_rem);
|
||||||
@ -120,7 +138,9 @@ private:
|
|||||||
do {
|
do {
|
||||||
prev = curr;
|
prev = curr;
|
||||||
curr = load(curr->next);
|
curr = load(curr->next);
|
||||||
} while (valid() && is_removed());
|
} while (valid() && is_removed()); // Loop ends if end of list is
|
||||||
|
// found or if not removed
|
||||||
|
// element is found.
|
||||||
return this->derived();
|
return this->derived();
|
||||||
}
|
}
|
||||||
It &operator++(int) { return operator++(); }
|
It &operator++(int) { return operator++(); }
|
||||||
@ -136,7 +156,7 @@ private:
|
|||||||
{
|
{
|
||||||
prev = nullptr;
|
prev = nullptr;
|
||||||
curr = load(list->head);
|
curr = load(list->head);
|
||||||
while (valid() && is_removed()) {
|
if (valid() && is_removed()) {
|
||||||
operator++();
|
operator++();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -150,9 +170,12 @@ private:
|
|||||||
// leak is less dangerous.
|
// leak is less dangerous.
|
||||||
auto node = new Node(data);
|
auto node = new Node(data);
|
||||||
Node *next = nullptr;
|
Node *next = nullptr;
|
||||||
|
// Insert at begining of list. Retrys on failure.
|
||||||
do {
|
do {
|
||||||
next = load(list->head);
|
next = load(list->head);
|
||||||
|
// First connect to next.
|
||||||
store(node->next, next);
|
store(node->next, next);
|
||||||
|
// Then try to set as head.
|
||||||
} while (!cas(list->head, next, node));
|
} while (!cas(list->head, next, node));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -165,11 +188,17 @@ private:
|
|||||||
bool remove()
|
bool remove()
|
||||||
{
|
{
|
||||||
assert(valid());
|
assert(valid());
|
||||||
|
// Try to logically remove it.
|
||||||
if (cas(curr->removed, false, true)) {
|
if (cas(curr->removed, false, true)) {
|
||||||
// I removed it!!!
|
// I removed it!!!
|
||||||
|
// Try to disconnect it from list.
|
||||||
if (!disconnect()) {
|
if (!disconnect()) {
|
||||||
|
// Disconnection failed because Node relative location in
|
||||||
|
// list changed. Whe firstly must find it again and then try
|
||||||
|
// to disconnect it again.
|
||||||
find_and_disconnect();
|
find_and_disconnect();
|
||||||
}
|
}
|
||||||
|
// Add to list of to be garbage collected.
|
||||||
store(curr->next_rem, swap(list->removed, curr));
|
store(curr->next_rem, swap(list->removed, curr));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -184,6 +213,8 @@ private:
|
|||||||
friend bool operator!=(const It &a, const It &b) { return !(a == b); }
|
friend bool operator!=(const It &a, const It &b) { return !(a == b); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// Fids current element starting from the begining of the list Retrys
|
||||||
|
// until it succesffuly disconnects it.
|
||||||
void find_and_disconnect()
|
void find_and_disconnect()
|
||||||
{
|
{
|
||||||
Node *bef = nullptr;
|
Node *bef = nullptr;
|
||||||
@ -191,28 +222,34 @@ private:
|
|||||||
auto next = load(curr->next);
|
auto next = load(curr->next);
|
||||||
while (now != nullptr) {
|
while (now != nullptr) {
|
||||||
if (now == curr) {
|
if (now == curr) {
|
||||||
prev = bef;
|
// Found it.
|
||||||
|
prev = bef; // Set the correct previous node in list.
|
||||||
if (disconnect()) {
|
if (disconnect()) {
|
||||||
|
// succesffuly disconnected it.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
// Let's try again from the begining.
|
||||||
bef = nullptr;
|
bef = nullptr;
|
||||||
now = load(list->head);
|
now = load(list->head);
|
||||||
} else if (now == next) { // Comparison with next is
|
} else if (now == next) { // Comparison with next is
|
||||||
// optimization for early return.
|
// optimization for early return.
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
|
// Now isn't the one whe are looking for lets try next one.
|
||||||
bef = now;
|
bef = now;
|
||||||
now = load(now->next);
|
now = load(now->next);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Trys to disconnect currrent element from
|
||||||
bool disconnect()
|
bool disconnect()
|
||||||
{
|
{
|
||||||
auto next = load(curr->next);
|
auto next = load(curr->next);
|
||||||
if (prev != nullptr) {
|
if (prev != nullptr) {
|
||||||
store(prev->next, next);
|
store(prev->next, next);
|
||||||
if (load(prev->removed)) {
|
if (load(prev->removed)) {
|
||||||
|
// previous isn't previous any more.
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else if (!cas(list->head, curr, next)) {
|
} else if (!cas(list->head, curr, next)) {
|
||||||
@ -278,14 +315,10 @@ public:
|
|||||||
|
|
||||||
Iterator begin() { return Iterator(this); }
|
Iterator begin() { return Iterator(this); }
|
||||||
|
|
||||||
// ConstIterator begin() { return ConstIterator(this); }
|
|
||||||
|
|
||||||
ConstIterator cbegin() { return ConstIterator(this); }
|
ConstIterator cbegin() { return ConstIterator(this); }
|
||||||
|
|
||||||
Iterator end() { return Iterator(); }
|
Iterator end() { return Iterator(); }
|
||||||
|
|
||||||
// ConstIterator end() { return ConstIterator(); }
|
|
||||||
|
|
||||||
ConstIterator cend() { return ConstIterator(); }
|
ConstIterator cend() { return ConstIterator(); }
|
||||||
|
|
||||||
std::size_t size() { return count.load(std::memory_order_consume); }
|
std::size_t size() { return count.load(std::memory_order_consume); }
|
||||||
|
@ -5,6 +5,9 @@
|
|||||||
|
|
||||||
using std::pair;
|
using std::pair;
|
||||||
|
|
||||||
|
// Multi thread safe map based on skiplist.
|
||||||
|
// K - type of key.
|
||||||
|
// T - type of data.
|
||||||
template <typename K, typename T>
|
template <typename K, typename T>
|
||||||
class ConcurrentMap
|
class ConcurrentMap
|
||||||
{
|
{
|
||||||
@ -57,11 +60,13 @@ public:
|
|||||||
|
|
||||||
list_it find(const K &key) { return accessor.find(key); }
|
list_it find(const K &key) { return accessor.find(key); }
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
list_it_con find_or_larger(const T &item) const
|
list_it_con find_or_larger(const T &item) const
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
list_it find_or_larger(const T &item)
|
list_it find_or_larger(const T &item)
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
|
@ -5,6 +5,9 @@
|
|||||||
|
|
||||||
using std::pair;
|
using std::pair;
|
||||||
|
|
||||||
|
// Multi thread safe multi map based on skiplist.
|
||||||
|
// K - type of key.
|
||||||
|
// T - type of data.
|
||||||
template <typename K, typename T>
|
template <typename K, typename T>
|
||||||
class ConcurrentMultiMap
|
class ConcurrentMultiMap
|
||||||
{
|
{
|
||||||
@ -53,11 +56,13 @@ public:
|
|||||||
|
|
||||||
list_it find(const K &key) { return accessor.find(key); }
|
list_it find(const K &key) { return accessor.find(key); }
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
list_it_con find_or_larger(const T &item) const
|
list_it_con find_or_larger(const T &item) const
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
list_it find_or_larger(const T &item)
|
list_it find_or_larger(const T &item)
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include "data_structures/concurrent/skiplist.hpp"
|
#include "data_structures/concurrent/skiplist.hpp"
|
||||||
|
|
||||||
|
// Multi thread safe multiset based on skiplist.
|
||||||
|
// T - type of data.
|
||||||
template <class T>
|
template <class T>
|
||||||
class ConcurrentMultiSet
|
class ConcurrentMultiSet
|
||||||
{
|
{
|
||||||
@ -36,11 +38,13 @@ public:
|
|||||||
|
|
||||||
list_it find(const T &item) { return accessor.find(item); }
|
list_it find(const T &item) { return accessor.find(item); }
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
list_it_con find_or_larger(const T &item) const
|
list_it_con find_or_larger(const T &item) const
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
list_it find_or_larger(const T &item)
|
list_it find_or_larger(const T &item)
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
#include "data_structures/concurrent/common.hpp"
|
#include "data_structures/concurrent/common.hpp"
|
||||||
#include "data_structures/concurrent/skiplist.hpp"
|
#include "data_structures/concurrent/skiplist.hpp"
|
||||||
|
|
||||||
|
// Multi thread safe set based on skiplist.
|
||||||
|
// T - type of data.
|
||||||
template <class T>
|
template <class T>
|
||||||
class ConcurrentSet
|
class ConcurrentSet
|
||||||
{
|
{
|
||||||
@ -37,18 +39,21 @@ public:
|
|||||||
|
|
||||||
list_it find(const T &item) { return accessor.find(item); }
|
list_it find(const T &item) { return accessor.find(item); }
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
template <class K>
|
template <class K>
|
||||||
list_it_con find_or_larger(const K &item) const
|
list_it_con find_or_larger(const K &item) const
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
template <class K>
|
template <class K>
|
||||||
list_it find_or_larger(const K &item)
|
list_it find_or_larger(const K &item)
|
||||||
{
|
{
|
||||||
return accessor.find_or_larger(item);
|
return accessor.find_or_larger(item);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns iterator to item or first larger if it doesn't exist.
|
||||||
template <class K>
|
template <class K>
|
||||||
list_it_con cfind_or_larger(const K &item)
|
list_it_con cfind_or_larger(const K &item)
|
||||||
{
|
{
|
||||||
|
@ -334,6 +334,8 @@ public:
|
|||||||
MultiIterator(SkipList *skiplist, const K &data)
|
MultiIterator(SkipList *skiplist, const K &data)
|
||||||
: data(data), skiplist(skiplist)
|
: data(data), skiplist(skiplist)
|
||||||
{
|
{
|
||||||
|
// WHe must find the first element with K key.
|
||||||
|
// All of logic in this loop was taken from insert method.
|
||||||
while (true) {
|
while (true) {
|
||||||
auto level = find_path(skiplist, H - 1, data, preds, succs);
|
auto level = find_path(skiplist, H - 1, data, preds, succs);
|
||||||
if (level == -1) {
|
if (level == -1) {
|
||||||
@ -368,32 +370,36 @@ public:
|
|||||||
return succs[0]->value();
|
return succs[0]->value();
|
||||||
}
|
}
|
||||||
|
|
||||||
// WRONG THIS CAN POSSIBLY NOT BE TRUE IF SOMEONE JUST AFTER THIS REMOVE
|
|
||||||
// ELEMENT AFTER THIS ONE.
|
|
||||||
// bool has_next()
|
|
||||||
// {
|
|
||||||
// assert(succs[0] != nullptr);
|
|
||||||
// return succs[0].forward(0) != nullptr;
|
|
||||||
// }
|
|
||||||
|
|
||||||
bool has_value() { return succs[0] != nullptr; }
|
bool has_value() { return succs[0] != nullptr; }
|
||||||
|
|
||||||
MultiIterator &operator++()
|
MultiIterator &operator++()
|
||||||
{
|
{
|
||||||
assert(succs[0] != nullptr);
|
assert(succs[0] != nullptr);
|
||||||
// This whole method can be optimized if it's valid to expect height
|
// NOTE: This whole method can be optimized if it's valid to expect
|
||||||
|
// height
|
||||||
// of 1 on same key elements.
|
// of 1 on same key elements.
|
||||||
|
|
||||||
|
// First update preds and succs.
|
||||||
for (int i = succs[0]->height - 1; i >= 0; i--) {
|
for (int i = succs[0]->height - 1; i >= 0; i--) {
|
||||||
preds[i] = succs[i];
|
preds[i] = succs[i];
|
||||||
succs[i] = preds[i]->forward(i);
|
succs[i] = preds[i]->forward(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there exists current value then check if it is equal to our
|
||||||
|
// data.
|
||||||
if (succs[0] != nullptr) {
|
if (succs[0] != nullptr) {
|
||||||
if (succs[0]->value() != data) {
|
if (succs[0]->value() != data) {
|
||||||
|
// Current data isn't equal to our data that means that this
|
||||||
|
// is the end of list of same values.
|
||||||
succs[0] = nullptr;
|
succs[0] = nullptr;
|
||||||
} else {
|
} else {
|
||||||
|
// Current value is same as our data but whe must check that
|
||||||
|
// it is valid data and if not whe must wait for it to
|
||||||
|
// become valid.
|
||||||
while (succs[0] != succs[succs[0]->height - 1] ||
|
while (succs[0] != succs[succs[0]->height - 1] ||
|
||||||
!succs[0]->flags.is_fully_linked()) {
|
!succs[0]->flags.is_fully_linked()) {
|
||||||
usleep(250);
|
usleep(250); // Wait to become linked
|
||||||
|
// Reget succs.
|
||||||
for (int i = succs[0]->height - 1; i >= 0; i--) {
|
for (int i = succs[0]->height - 1; i >= 0; i--) {
|
||||||
succs[i] = preds[i]->forward(i);
|
succs[i] = preds[i]->forward(i);
|
||||||
}
|
}
|
||||||
@ -427,6 +433,8 @@ public:
|
|||||||
bool remove()
|
bool remove()
|
||||||
{
|
{
|
||||||
assert(succs[0] != nullptr);
|
assert(succs[0] != nullptr);
|
||||||
|
// Calls skiplist remove method.
|
||||||
|
|
||||||
return skiplist->template remove<K>(
|
return skiplist->template remove<K>(
|
||||||
data, preds, succs,
|
data, preds, succs,
|
||||||
SkipList<T>::template MultiIterator<K>::update_path);
|
SkipList<T>::template MultiIterator<K>::update_path);
|
||||||
@ -436,14 +444,18 @@ public:
|
|||||||
static int update_path(SkipList *skiplist, int start, const K &item,
|
static int update_path(SkipList *skiplist, int start, const K &item,
|
||||||
Node *preds[], Node *succs[])
|
Node *preds[], Node *succs[])
|
||||||
{
|
{
|
||||||
|
// NOTE: This could be done more efficent than serching for item
|
||||||
|
// element again. Whe just need to use infromation already present
|
||||||
|
// in preds and succ because whe know that this method is used
|
||||||
|
// exclusively by passing it into skiplist remove method from
|
||||||
|
// MultiIterator remove method.
|
||||||
|
|
||||||
// One optimization here would be to wait for is_fully_linked to be
|
// One optimization here would be to wait for is_fully_linked to be
|
||||||
// true. That way that doesnt have to be done in constructor and
|
// true. That way that doesnt have to be done in constructor and
|
||||||
// ++ operator.
|
// ++ operator.
|
||||||
int level_found = succs[0]->height - 1;
|
int level_found = succs[0]->height - 1;
|
||||||
assert(succs[0] == succs[level_found]);
|
assert(succs[0] == succs[level_found]);
|
||||||
// for (int i = level_found; i >= 0; i--) {
|
|
||||||
// // Someone has done something
|
|
||||||
// if (preds[i]->forward(i) != succs[i]) {
|
|
||||||
for (auto it = MultiIterator<K>(skiplist, item); it.has_value();
|
for (auto it = MultiIterator<K>(skiplist, item); it.has_value();
|
||||||
it++) {
|
it++) {
|
||||||
if (it.succs[0] == succs[0]) { // Found it
|
if (it.succs[0] == succs[0]) { // Found it
|
||||||
@ -453,12 +465,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Someone removed it
|
// Someone removed it
|
||||||
// assert(succs[0]->flags.is_marked());
|
|
||||||
return -1;
|
return -1;
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// // Everything is fine
|
|
||||||
// return level_found;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const K &data;
|
const K &data;
|
||||||
@ -751,10 +758,12 @@ private:
|
|||||||
return valid;
|
return valid;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Inserts non unique data into list.
|
||||||
|
// NOTE: Uses modified logic from insert method.
|
||||||
Iterator insert_non_unique(T &&data, Node *preds[], Node *succs[])
|
Iterator insert_non_unique(T &&data, Node *preds[], Node *succs[])
|
||||||
{
|
{
|
||||||
while (true) {
|
while (true) {
|
||||||
// TODO: before here was data.first
|
|
||||||
auto level = find_path(this, H - 1, data, preds, succs);
|
auto level = find_path(this, H - 1, data, preds, succs);
|
||||||
|
|
||||||
auto height = 1;
|
auto height = 1;
|
||||||
@ -849,7 +858,7 @@ private:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert unique data
|
// Insert unique data
|
||||||
// TODO: This is almost all duplicate code from insert
|
// NOTE: This is almost all duplicate code from insert.
|
||||||
template <class K, class... Args>
|
template <class K, class... Args>
|
||||||
std::pair<Iterator, bool> emplace(Node *preds[], Node *succs[], K &key,
|
std::pair<Iterator, bool> emplace(Node *preds[], Node *succs[], K &key,
|
||||||
Args &&... args)
|
Args &&... args)
|
||||||
|
@ -20,11 +20,7 @@ protected:
|
|||||||
public:
|
public:
|
||||||
Combined() : data(0) {}
|
Combined() : data(0) {}
|
||||||
|
|
||||||
Combined(D *data, size_t off)
|
Combined(D *data, size_t off) { this->data = ((size_t)data) | off; }
|
||||||
{
|
|
||||||
// assert((((size_t)data) & 0x7) == 0 && off < 8);
|
|
||||||
this->data = ((size_t)data) | off;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool valid() const { return data != 0; }
|
bool valid() const { return data != 0; }
|
||||||
|
|
||||||
@ -72,6 +68,7 @@ protected:
|
|||||||
size_t data;
|
size_t data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Base for all iterators. It can start from any point in map.
|
||||||
template <class It>
|
template <class It>
|
||||||
class IteratorBase : public Crtp<It>
|
class IteratorBase : public Crtp<It>
|
||||||
{
|
{
|
||||||
@ -97,7 +94,11 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
const RhBase *map;
|
const RhBase *map;
|
||||||
|
|
||||||
|
// How many times did whe advance.
|
||||||
size_t advanced;
|
size_t advanced;
|
||||||
|
|
||||||
|
// Current position in array
|
||||||
size_t index;
|
size_t index;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -123,12 +124,15 @@ protected:
|
|||||||
do {
|
do {
|
||||||
advanced++;
|
advanced++;
|
||||||
if (advanced >= map->capacity) {
|
if (advanced >= map->capacity) {
|
||||||
|
// Whe have advanced more than the capacity of map is so whe
|
||||||
|
// are done.
|
||||||
map = nullptr;
|
map = nullptr;
|
||||||
advanced = index = ~((size_t)0);
|
advanced = index = ~((size_t)0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
index = (index + 1) & mask;
|
index = (index + 1) & mask;
|
||||||
} while (!map->array[index].valid());
|
} while (!map->array[index].valid()); // Check if there is element
|
||||||
|
// at current position.
|
||||||
|
|
||||||
return this->derived();
|
return this->derived();
|
||||||
}
|
}
|
||||||
@ -221,6 +225,7 @@ public:
|
|||||||
ConstIterator cend() const { return ConstIterator(); }
|
ConstIterator cend() const { return ConstIterator(); }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
// Copys RAW BYTE data from other RhBase.
|
||||||
void copy_from(const RhBase &other)
|
void copy_from(const RhBase &other)
|
||||||
{
|
{
|
||||||
capacity = other.capacity;
|
capacity = other.capacity;
|
||||||
@ -235,6 +240,7 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Takes data from other RhBase.
|
||||||
void take_from(RhBase &&other)
|
void take_from(RhBase &&other)
|
||||||
{
|
{
|
||||||
capacity = other.capacity;
|
capacity = other.capacity;
|
||||||
@ -245,16 +251,17 @@ protected:
|
|||||||
other.capacity = 0;
|
other.capacity = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void init_array(size_t size)
|
// Initiazes array with given capacity.
|
||||||
|
void init_array(size_t capacity)
|
||||||
{
|
{
|
||||||
size_t bytes = sizeof(Combined) * size;
|
size_t bytes = sizeof(Combined) * capacity;
|
||||||
array = (Combined *)malloc(bytes);
|
array = (Combined *)malloc(bytes);
|
||||||
std::memset(array, 0, bytes);
|
std::memset(array, 0, bytes);
|
||||||
capacity = size;
|
this->capacity = capacity;
|
||||||
}
|
}
|
||||||
|
|
||||||
// True if before array has some values.
|
// True if before array has some values.
|
||||||
// Before array has to be released also.
|
// Before array must be released in the caller.
|
||||||
bool increase_size()
|
bool increase_size()
|
||||||
{
|
{
|
||||||
if (capacity == 0) {
|
if (capacity == 0) {
|
||||||
@ -276,6 +283,7 @@ protected:
|
|||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
// Cleares all data.
|
||||||
void clear()
|
void clear()
|
||||||
{
|
{
|
||||||
free(array);
|
free(array);
|
||||||
@ -297,7 +305,7 @@ protected:
|
|||||||
return hash(std::hash<K>()(key)) & mask;
|
return hash(std::hash<K>()(key)) & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is rather expensive but offers good distribution.
|
// NOTE: This is rather expensive but offers good distribution.
|
||||||
size_t hash(size_t x) const
|
size_t hash(size_t x) const
|
||||||
{
|
{
|
||||||
x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
|
x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
#include <functional>
|
#include <functional>
|
||||||
|
|
||||||
#include "utils/crtp.hpp"
|
|
||||||
#include "data_structures/map/rh_common.hpp"
|
#include "data_structures/map/rh_common.hpp"
|
||||||
|
#include "utils/crtp.hpp"
|
||||||
#include "utils/option_ptr.hpp"
|
#include "utils/option_ptr.hpp"
|
||||||
|
|
||||||
// HashMap with RobinHood collision resolution policy.
|
// HashMap with RobinHood collision resolution policy.
|
||||||
@ -47,17 +47,20 @@ public:
|
|||||||
size_t now = index(key, mask);
|
size_t now = index(key, mask);
|
||||||
size_t off = 0;
|
size_t off = 0;
|
||||||
size_t border = 8 <= capacity ? 8 : capacity;
|
size_t border = 8 <= capacity ? 8 : capacity;
|
||||||
|
|
||||||
while (off < border) {
|
while (off < border) {
|
||||||
Combined other = array[now];
|
Combined other = array[now];
|
||||||
if (other.valid()) {
|
if (other.valid()) {
|
||||||
auto other_off = other.off();
|
auto other_off = other.off();
|
||||||
if (other_off == off && key == other.ptr()->get_key()) {
|
if (other_off == off && key == other.ptr()->get_key()) {
|
||||||
|
// Found data.
|
||||||
return OptionPtr<D>(other.ptr());
|
return OptionPtr<D>(other.ptr());
|
||||||
|
|
||||||
} else if (other_off < off) { // Other is rich
|
} else if (other_off < off) { // Other is rich
|
||||||
break;
|
break;
|
||||||
} // Else other has equal or greater offset, so he is poor.
|
} // Else other has equal or greater offset, so he is poor.
|
||||||
} else {
|
} else {
|
||||||
|
// Empty slot means that there is no searched data.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,16 +79,20 @@ public:
|
|||||||
size_t now = index(key, mask);
|
size_t now = index(key, mask);
|
||||||
size_t off = 0;
|
size_t off = 0;
|
||||||
size_t border = 8 <= capacity ? 8 : capacity;
|
size_t border = 8 <= capacity ? 8 : capacity;
|
||||||
|
|
||||||
while (off < border) {
|
while (off < border) {
|
||||||
Combined other = array[now];
|
Combined other = array[now];
|
||||||
if (other.valid()) {
|
if (other.valid()) {
|
||||||
auto other_off = other.off();
|
auto other_off = other.off();
|
||||||
if (other_off == off && key == other.ptr()->get_key()) {
|
if (other_off == off && key == other.ptr()->get_key()) {
|
||||||
|
// Element already exists.
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
} else if (other_off < off) { // Other is rich
|
} else if (other_off < off) { // Other is rich
|
||||||
|
// Set data.
|
||||||
array[now] = Combined(data, off);
|
array[now] = Combined(data, off);
|
||||||
|
|
||||||
|
// Move other data to the higher indexes,
|
||||||
while (other.increment_off()) {
|
while (other.increment_off()) {
|
||||||
now = (now + 1) & mask;
|
now = (now + 1) & mask;
|
||||||
auto tmp = array[now];
|
auto tmp = array[now];
|
||||||
@ -97,9 +104,11 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
data = other.ptr();
|
data = other.ptr();
|
||||||
break; // Cant insert removed element
|
break; // Cant insert removed element because it would
|
||||||
|
// be to far from his real place.
|
||||||
} // Else other has equal or greater offset, so he is poor.
|
} // Else other has equal or greater offset, so he is poor.
|
||||||
} else {
|
} else {
|
||||||
|
// Data can be placed in this empty slot.
|
||||||
array[now] = Combined(data, off);
|
array[now] = Combined(data, off);
|
||||||
count++;
|
count++;
|
||||||
return true;
|
return true;
|
||||||
@ -110,6 +119,8 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// There isn't enough space for element pointed by data so whe must
|
||||||
|
// increase array.
|
||||||
increase_size();
|
increase_size();
|
||||||
return insert(data);
|
return insert(data);
|
||||||
}
|
}
|
||||||
@ -121,6 +132,7 @@ public:
|
|||||||
size_t now = index(key, mask);
|
size_t now = index(key, mask);
|
||||||
size_t off = 0;
|
size_t off = 0;
|
||||||
size_t border = 8 <= capacity ? 8 : capacity;
|
size_t border = 8 <= capacity ? 8 : capacity;
|
||||||
|
|
||||||
while (off < border) {
|
while (off < border) {
|
||||||
Combined other = array[now];
|
Combined other = array[now];
|
||||||
if (other.valid()) {
|
if (other.valid()) {
|
||||||
@ -130,6 +142,7 @@ public:
|
|||||||
key == other_ptr->get_key()) { // Found it
|
key == other_ptr->get_key()) { // Found it
|
||||||
|
|
||||||
auto before = now;
|
auto before = now;
|
||||||
|
// Whe must move other elements one slot lower.
|
||||||
do {
|
do {
|
||||||
// This is alright even for off=0 on found element
|
// This is alright even for off=0 on found element
|
||||||
// because it wont be seen.
|
// because it wont be seen.
|
||||||
@ -139,7 +152,10 @@ public:
|
|||||||
before = now;
|
before = now;
|
||||||
now = (now + 1) & mask;
|
now = (now + 1) & mask;
|
||||||
other = array[now];
|
other = array[now];
|
||||||
} while (other.valid() && other.off() > 0);
|
} while (other.valid() &&
|
||||||
|
other.off() > 0); // Exit if whe encounter empty
|
||||||
|
// slot or data which is exactly
|
||||||
|
// in slot which it want's to be.
|
||||||
|
|
||||||
array[before] = Combined();
|
array[before] = Combined();
|
||||||
count--;
|
count--;
|
||||||
@ -149,6 +165,7 @@ public:
|
|||||||
break;
|
break;
|
||||||
} // Else other has equal or greater offset, so he is poor.
|
} // Else other has equal or greater offset, so he is poor.
|
||||||
} else {
|
} else {
|
||||||
|
// If the element to be removed existed in map it would be here.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,6 +15,27 @@
|
|||||||
// D must have method K& get_key()
|
// D must have method K& get_key()
|
||||||
// K must be comparable with ==.
|
// K must be comparable with ==.
|
||||||
// HashMap behaves as if it isn't owner of entrys.
|
// HashMap behaves as if it isn't owner of entrys.
|
||||||
|
//
|
||||||
|
// Main idea of this MultiMap is a tweak of logic in RobinHood.
|
||||||
|
// RobinHood offset from prefered slot is equal to the number of slots between
|
||||||
|
// [current slot and prefered slot>.
|
||||||
|
// While in this flavour of "multi RobinHood" offset from prefered slot is equal
|
||||||
|
// to the number of different keyed elements between his current slot and
|
||||||
|
// prefered slot.
|
||||||
|
// In the following examples slots will have keys as caracters. So something
|
||||||
|
// like this: |a| will mean that in this slot there is data with key 'a'.
|
||||||
|
// like this: | | will mean empty slot.
|
||||||
|
// like this: |...| will mean arbitary number of slots.
|
||||||
|
// like this: |b:a| will mean that a want's to be in slot but b is in't.
|
||||||
|
//
|
||||||
|
// Examples:
|
||||||
|
// |...|a:a|...| => off(a) = 0
|
||||||
|
// |...|a:a|a|...|a|...| => off(a) = 0
|
||||||
|
// |...|b:a|a|...| => off(a) = 1
|
||||||
|
// |...|b:a|b|...|b|a|...| => off(a) = 1
|
||||||
|
// |...|c:a|b|a|...| => off(a) = 2
|
||||||
|
// |...|c:a|c|...|c|b|...|b||a|...|a|...| => off(a) = 2
|
||||||
|
// ...
|
||||||
template <class K, class D, size_t init_size_pow2 = 2>
|
template <class K, class D, size_t init_size_pow2 = 2>
|
||||||
class RhHashMultiMap : public RhBase<K, D, init_size_pow2>
|
class RhHashMultiMap : public RhBase<K, D, init_size_pow2>
|
||||||
{
|
{
|
||||||
@ -124,14 +145,18 @@ public:
|
|||||||
bool multi = false;
|
bool multi = false;
|
||||||
if (other_off == off && other.ptr()->get_key() == key) {
|
if (other_off == off && other.ptr()->get_key() == key) {
|
||||||
// Found the same
|
// Found the same
|
||||||
|
// Must skip same keyd values to insert new value at the
|
||||||
|
// end.
|
||||||
do {
|
do {
|
||||||
now = (now + 1) & mask;
|
now = (now + 1) & mask;
|
||||||
other = array[now];
|
other = array[now];
|
||||||
if (!other.valid()) {
|
if (!other.valid()) {
|
||||||
|
// Found empty slot in which data ca be added.
|
||||||
set(now, data, off);
|
set(now, data, off);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} while (other.equal(key, off));
|
} while (other.equal(key, off));
|
||||||
|
// There is no empty slot after same keyed values.
|
||||||
multi = true;
|
multi = true;
|
||||||
} else if (other_off > off ||
|
} else if (other_off > off ||
|
||||||
other_poor(other, mask, start,
|
other_poor(other, mask, start,
|
||||||
@ -142,6 +167,8 @@ public:
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Data will be insrted at current slot and all other data
|
||||||
|
// will be displaced for one slot.
|
||||||
array[now] = Combined(data, off);
|
array[now] = Combined(data, off);
|
||||||
auto start_insert = now;
|
auto start_insert = now;
|
||||||
while (is_off_adjusted(other, mask, start_insert, now,
|
while (is_off_adjusted(other, mask, start_insert, now,
|
||||||
@ -152,6 +179,7 @@ public:
|
|||||||
array[now] = other;
|
array[now] = other;
|
||||||
other = tmp;
|
other = tmp;
|
||||||
if (!other.valid()) {
|
if (!other.valid()) {
|
||||||
|
// Found empty slot which means i can finish now.
|
||||||
count++;
|
count++;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -159,12 +187,14 @@ public:
|
|||||||
data = other.ptr();
|
data = other.ptr();
|
||||||
break; // Cant insert removed element
|
break; // Cant insert removed element
|
||||||
} else {
|
} else {
|
||||||
|
// Found empty slot for data.
|
||||||
set(now, data, off);
|
set(now, data, off);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// There is't enough space for data.
|
||||||
increase_size();
|
increase_size();
|
||||||
add(data);
|
add(data);
|
||||||
}
|
}
|
||||||
@ -179,14 +209,18 @@ public:
|
|||||||
size_t off = 0;
|
size_t off = 0;
|
||||||
size_t border = 8 <= capacity ? 8 : capacity;
|
size_t border = 8 <= capacity ? 8 : capacity;
|
||||||
Combined other = array[now];
|
Combined other = array[now];
|
||||||
|
|
||||||
while (other.valid() && off < border) {
|
while (other.valid() && off < border) {
|
||||||
const size_t other_off = other.off();
|
const size_t other_off = other.off();
|
||||||
if (other_off == off && key == other.ptr()->get_key()) {
|
if (other_off == off && key == other.ptr()->get_key()) {
|
||||||
|
// Found same key data.
|
||||||
auto founded = capacity;
|
auto founded = capacity;
|
||||||
size_t started = now;
|
size_t started = now;
|
||||||
bool multi = false;
|
bool multi = false;
|
||||||
|
// Must find slot with searched data.
|
||||||
do {
|
do {
|
||||||
if (other.ptr() == data) {
|
if (other.ptr() == data) {
|
||||||
|
// founded it.
|
||||||
founded = now;
|
founded = now;
|
||||||
}
|
}
|
||||||
now = (now + 1) & mask;
|
now = (now + 1) & mask;
|
||||||
@ -196,11 +230,14 @@ public:
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} while (other.equal(key, off) && (multi = true));
|
} while (other.equal(key, off) && (multi = true));
|
||||||
// multi = true is correct
|
|
||||||
if (founded == capacity) {
|
if (founded == capacity) {
|
||||||
|
// Didn't found the data.
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Data will be removed by moving other data by one slot
|
||||||
|
// before.
|
||||||
auto bef = before_index(now, mask);
|
auto bef = before_index(now, mask);
|
||||||
array[founded] = array[bef];
|
array[founded] = array[bef];
|
||||||
|
|
||||||
@ -223,6 +260,8 @@ public:
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
} else { // Else other has equal or greater off, so he is poor.
|
} else { // Else other has equal or greater off, so he is poor.
|
||||||
|
// Must skip values of same keys but different key than
|
||||||
|
// data.
|
||||||
if (UNLIKELY(skip(now, other, other_off, mask))) {
|
if (UNLIKELY(skip(now, other, other_off, mask))) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -317,54 +356,3 @@ private:
|
|||||||
(end < start && p <= start && p > end);
|
(end < start && p <= start && p > end);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Unnecessary
|
|
||||||
// // Removes element. Returns removed element if it existed. It doesn't
|
|
||||||
// // specify which element from same key group will be removed.
|
|
||||||
// OptionPtr<D> remove(const K &key_in)
|
|
||||||
// {
|
|
||||||
//
|
|
||||||
// if (count > 0) {
|
|
||||||
// auto key = std::ref(key_in);
|
|
||||||
// size_t mask = this->mask();
|
|
||||||
// size_t now = index(key, mask);
|
|
||||||
// size_t off = 0;
|
|
||||||
// size_t checked = 0;
|
|
||||||
// size_t border = 8 <= capacity ? 8 : capacity;
|
|
||||||
// Combined other = array[now];
|
|
||||||
// while (other.valid() && off < border) {
|
|
||||||
// auto other_off = other.off();
|
|
||||||
// bool multi = false;
|
|
||||||
// if (other_off == off && key == other.ptr()->get_key()) {
|
|
||||||
// do {
|
|
||||||
// now = (now + 1) & mask;
|
|
||||||
// other = array[now];
|
|
||||||
// if (!other.valid()) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// other_off = other.off();
|
|
||||||
// } while (other_off == off &&
|
|
||||||
// other.ptr()->get_key() == key &&
|
|
||||||
// (multi = true)); // multi = true is correct
|
|
||||||
//
|
|
||||||
// auto bef = before_index(now, mask);
|
|
||||||
// auto ret = OptionPtr<D>(array[bef].ptr());
|
|
||||||
//
|
|
||||||
// move_before(now, bef, other, mask, multi);
|
|
||||||
// return ret;
|
|
||||||
//
|
|
||||||
// } else if (other_off < off) { // Other is rich
|
|
||||||
// break;
|
|
||||||
//
|
|
||||||
// } else { // Else other has equal or greater off, so he is
|
|
||||||
// poor.
|
|
||||||
// if (UNLIKELY(skip(now, other, other_off, mask))) {
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
// off++;
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// return OptionPtr<D>();
|
|
||||||
// }
|
|
||||||
|
@ -20,9 +20,17 @@ class Db
|
|||||||
public:
|
public:
|
||||||
using sptr = std::shared_ptr<Db>;
|
using sptr = std::shared_ptr<Db>;
|
||||||
|
|
||||||
|
// import_snapshot will in constructor import latest snapshot into the db.
|
||||||
|
// NOTE: explicit is here to prevent compiler from evaluating const char *
|
||||||
|
// into a bool.
|
||||||
explicit Db(bool import_snapshot = true);
|
explicit Db(bool import_snapshot = true);
|
||||||
|
|
||||||
|
// import_snapshot will in constructor import latest snapshot into the db.
|
||||||
Db(const char *name, bool import_snapshot = true);
|
Db(const char *name, bool import_snapshot = true);
|
||||||
|
|
||||||
|
// import_snapshot will in constructor import latest snapshot into the db.
|
||||||
Db(const std::string &name, bool import_snapshot = true);
|
Db(const std::string &name, bool import_snapshot = true);
|
||||||
|
|
||||||
Db(const Db &db) = delete;
|
Db(const Db &db) = delete;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -53,7 +53,9 @@ public:
|
|||||||
DbAccessor(Db &db, tx::Transaction &t);
|
DbAccessor(Db &db, tx::Transaction &t);
|
||||||
|
|
||||||
//*******************VERTEX METHODS
|
//*******************VERTEX METHODS
|
||||||
|
// Returns iterator of VertexAccessor for all vertices.
|
||||||
// TODO: Implement class specaily for this return
|
// TODO: Implement class specaily for this return
|
||||||
|
// NOTE: This implementation must be here to be able to infere return type.
|
||||||
auto vertex_access()
|
auto vertex_access()
|
||||||
{
|
{
|
||||||
return iter::make_map(
|
return iter::make_map(
|
||||||
@ -63,12 +65,16 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Optionaly return vertex with given internal Id.
|
||||||
Option<const VertexAccessor> vertex_find(const Id &id);
|
Option<const VertexAccessor> vertex_find(const Id &id);
|
||||||
|
|
||||||
// Creates new Vertex and returns filled VertexAccessor.
|
// Creates new Vertex and returns filled VertexAccessor.
|
||||||
VertexAccessor vertex_insert();
|
VertexAccessor vertex_insert();
|
||||||
|
|
||||||
// ******************* EDGE METHODS
|
// ******************* EDGE METHODS
|
||||||
|
// Returns iterator of EdgeAccessor for all edges.
|
||||||
|
// TODO: Implement class specaily for this return
|
||||||
|
// NOTE: This implementation must be here to be able to infere return type.
|
||||||
auto edge_access()
|
auto edge_access()
|
||||||
{
|
{
|
||||||
return iter::make_map(
|
return iter::make_map(
|
||||||
@ -78,6 +84,7 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Optionally return Edge with given internal Id.
|
||||||
Option<const EdgeAccessor> edge_find(const Id &id);
|
Option<const EdgeAccessor> edge_find(const Id &id);
|
||||||
|
|
||||||
// Creates new Edge and returns filled EdgeAccessor.
|
// Creates new Edge and returns filled EdgeAccessor.
|
||||||
@ -89,15 +96,17 @@ public:
|
|||||||
VertexAccessor const &to);
|
VertexAccessor const &to);
|
||||||
|
|
||||||
// ******************* LABEL METHODS
|
// ******************* LABEL METHODS
|
||||||
|
// Finds or crated label with given name.
|
||||||
const Label &label_find_or_create(const char *name);
|
const Label &label_find_or_create(const char *name);
|
||||||
|
|
||||||
|
// True if label with name exists.
|
||||||
bool label_contains(const char *name);
|
bool label_contains(const char *name);
|
||||||
|
|
||||||
// ******************** TYPE METHODS
|
// ******************** TYPE METHODS
|
||||||
|
// Finds or creates edge_type with given name.
|
||||||
const EdgeType &type_find_or_create(const char *name);
|
const EdgeType &type_find_or_create(const char *name);
|
||||||
|
|
||||||
|
// True if edge_type with given name exists.
|
||||||
bool type_contains(const char *name);
|
bool type_contains(const char *name);
|
||||||
|
|
||||||
// ******************** PROPERTY METHODS
|
// ******************** PROPERTY METHODS
|
||||||
@ -135,6 +144,8 @@ public:
|
|||||||
|
|
||||||
// True if commit was successful, or false if transaction was aborted.
|
// True if commit was successful, or false if transaction was aborted.
|
||||||
bool commit();
|
bool commit();
|
||||||
|
|
||||||
|
// Aborts transaction.
|
||||||
void abort();
|
void abort();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -146,11 +157,3 @@ private:
|
|||||||
|
|
||||||
DbTransaction db_transaction;
|
DbTransaction db_transaction;
|
||||||
};
|
};
|
||||||
|
|
||||||
// ********************** CONVENIENT FUNCTIONS
|
|
||||||
|
|
||||||
template <class R>
|
|
||||||
bool option_fill(Option<R> &o)
|
|
||||||
{
|
|
||||||
return o.is_present() && o.get().fill();
|
|
||||||
}
|
|
||||||
|
@ -28,14 +28,20 @@ public:
|
|||||||
|
|
||||||
// Cleans edge part of database. MUST be called by one cleaner thread at
|
// Cleans edge part of database. MUST be called by one cleaner thread at
|
||||||
// one time.
|
// one time.
|
||||||
|
// TODO: Should be exctracted to separate class which can enforce one thread
|
||||||
|
// at atime.
|
||||||
void clean_edge_section();
|
void clean_edge_section();
|
||||||
|
|
||||||
// Cleans vertex part of database. MUST be called by one cleaner thread at
|
// Cleans vertex part of database. MUST be called by one cleaner thread at
|
||||||
// one time..
|
// one time..
|
||||||
|
// TODO: Should be exctracted to separate class which can enforce one thread
|
||||||
|
// at atime.
|
||||||
void clean_vertex_section();
|
void clean_vertex_section();
|
||||||
|
|
||||||
// Updates indexes of Vertex/Edges in index_updates. True if indexes are
|
// Updates indexes of Vertex/Edges in index_updates. True if indexes are
|
||||||
// updated successfully. False means that transaction failed.
|
// updated successfully. False means that transaction failed.
|
||||||
|
// TODO: Should be moved to Indexes class where it will this DbTransaction
|
||||||
|
// as an argument.
|
||||||
bool update_indexes();
|
bool update_indexes();
|
||||||
|
|
||||||
// Will update indexes for given element TG::record_t. Actual update happens
|
// Will update indexes for given element TG::record_t. Actual update happens
|
||||||
|
@ -10,9 +10,10 @@ class Cleaning
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
// How much sec is a cleaning_cycle in which cleaner will clean at most
|
// How much sec is a cleaning_cycle in which cleaner will clean at most
|
||||||
// once.
|
// once. Starts cleaner thread.
|
||||||
Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle);
|
Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle);
|
||||||
|
|
||||||
|
// Destroys this object after this thread joins cleaning thread.
|
||||||
~Cleaning();
|
~Cleaning();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -22,5 +23,6 @@ private:
|
|||||||
|
|
||||||
std::vector<std::unique_ptr<Thread>> cleaners;
|
std::vector<std::unique_ptr<Thread>> cleaners;
|
||||||
|
|
||||||
|
// Should i continue cleaning.
|
||||||
std::atomic<bool> cleaning = {true};
|
std::atomic<bool> cleaning = {true};
|
||||||
};
|
};
|
||||||
|
@ -30,7 +30,9 @@ private:
|
|||||||
// currently active database
|
// currently active database
|
||||||
std::atomic<Db *> active_db;
|
std::atomic<Db *> active_db;
|
||||||
|
|
||||||
|
// Cleaning thread.
|
||||||
Cleaning cleaning = {dbs, CONFIG_INTEGER(config::CLEANING_CYCLE_SEC)};
|
Cleaning cleaning = {dbs, CONFIG_INTEGER(config::CLEANING_CYCLE_SEC)};
|
||||||
|
|
||||||
|
// Snapshoting thread.
|
||||||
Snapshoter snapshoter = {dbs, CONFIG_INTEGER(config::SNAPSHOT_CYCLE_SEC)};
|
Snapshoter snapshoter = {dbs, CONFIG_INTEGER(config::SNAPSHOT_CYCLE_SEC)};
|
||||||
};
|
};
|
||||||
|
@ -98,7 +98,8 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extracts parts while stripping data of array chars and qutation marks.
|
// Extracts parts of str while stripping parts of array chars and qutation
|
||||||
|
// marks. Parts are separated with delimiter.
|
||||||
void extract(char *str, const char delimiter, vector<char *> &sub_str)
|
void extract(char *str, const char delimiter, vector<char *> &sub_str)
|
||||||
{
|
{
|
||||||
int head = 0;
|
int head = 0;
|
||||||
@ -148,12 +149,9 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
sub_str.push_back(&str[head]);
|
sub_str.push_back(&str[head]);
|
||||||
//
|
|
||||||
// for (auto s : sub_str) {
|
|
||||||
// cout << "#" << s;
|
|
||||||
// }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Optionaly return vertex with given import local id if it exists.
|
||||||
Option<VertexAccessor> const &get_vertex(size_t id)
|
Option<VertexAccessor> const &get_vertex(size_t id)
|
||||||
{
|
{
|
||||||
if (vertices.size() > id) {
|
if (vertices.size() > id) {
|
||||||
@ -168,6 +166,8 @@ public:
|
|||||||
DbAccessor &db;
|
DbAccessor &db;
|
||||||
Logger logger;
|
Logger logger;
|
||||||
|
|
||||||
|
// Varius marks and delimiters. They can be freely changed here and
|
||||||
|
// everything will work.
|
||||||
char parts_mark = ',';
|
char parts_mark = ',';
|
||||||
char parts_array_mark = ',';
|
char parts_array_mark = ',';
|
||||||
char type_mark = ':';
|
char type_mark = ':';
|
||||||
@ -176,6 +176,6 @@ public:
|
|||||||
char closed_bracket = ']';
|
char closed_bracket = ']';
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
// All created vertices which have import local id
|
// All created vertices which have import local id.
|
||||||
vector<Option<VertexAccessor>> vertices;
|
vector<Option<VertexAccessor>> vertices;
|
||||||
};
|
};
|
||||||
|
@ -46,7 +46,19 @@ bool equal_str(const char *a, const char *b) { return strcasecmp(a, b) == 0; }
|
|||||||
|
|
||||||
// CSV importer for importing multiple files regarding same graph.
|
// CSV importer for importing multiple files regarding same graph.
|
||||||
// CSV format of file should be following:
|
// CSV format of file should be following:
|
||||||
|
// header
|
||||||
|
// line of data
|
||||||
|
// line of data
|
||||||
|
// ...
|
||||||
//
|
//
|
||||||
|
// Where header should be composed of parts splited by parts_mark. Number of
|
||||||
|
// parts should be same as number of parts in every line of data. Parts should
|
||||||
|
// be of format name:type where name is alfanumeric identifyer of data in thath
|
||||||
|
// column and type should be one of: id, from, to, label, type, bool, int, long,
|
||||||
|
// float, double, string, bool[], int[], long[], float[], double[], string[].
|
||||||
|
// If name is missing the column data wont be saved into the elements.
|
||||||
|
// if the type is missing the column will be interperted as type string. If
|
||||||
|
// neither name nor type are present column will be skipped.
|
||||||
class CSVImporter : public BaseImporter
|
class CSVImporter : public BaseImporter
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -70,6 +82,8 @@ public:
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
// Loads data from file and returns number of loaded name.
|
// Loads data from file and returns number of loaded name.
|
||||||
|
// TG - TypeGroup
|
||||||
|
// F - function which will create element from filled element skelleton.
|
||||||
template <class TG, class F>
|
template <class TG, class F>
|
||||||
size_t import(std::fstream &file, F f, bool vertex)
|
size_t import(std::fstream &file, F f, bool vertex)
|
||||||
{
|
{
|
||||||
@ -104,10 +118,6 @@ private:
|
|||||||
size_t line_no = 1;
|
size_t line_no = 1;
|
||||||
ElementSkeleton es(db);
|
ElementSkeleton es(db);
|
||||||
while (std::getline(file, line)) {
|
while (std::getline(file, line)) {
|
||||||
// if (line_no % 1000 == 0) {
|
|
||||||
// cout << line_no << endl;
|
|
||||||
// }
|
|
||||||
// cout << line << endl;
|
|
||||||
sub_str.clear();
|
sub_str.clear();
|
||||||
es.clear();
|
es.clear();
|
||||||
|
|
||||||
@ -196,6 +206,7 @@ private:
|
|||||||
if (tmp_vec.size() > 2) {
|
if (tmp_vec.size() > 2) {
|
||||||
logger.error("To much sub parts in header part");
|
logger.error("To much sub parts in header part");
|
||||||
return make_option<unique_ptr<Filler>>();
|
return make_option<unique_ptr<Filler>>();
|
||||||
|
|
||||||
} else if (tmp_vec.size() < 2) {
|
} else if (tmp_vec.size() < 2) {
|
||||||
if (tmp_vec.size() == 1) {
|
if (tmp_vec.size() == 1) {
|
||||||
logger.warn("Column: {} doesn't have specified type so string "
|
logger.warn("Column: {} doesn't have specified type so string "
|
||||||
@ -203,16 +214,19 @@ private:
|
|||||||
tmp_vec[0]);
|
tmp_vec[0]);
|
||||||
name = tmp_vec[0];
|
name = tmp_vec[0];
|
||||||
type = _string;
|
type = _string;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
logger.warn("Empty colum definition, skiping column.");
|
logger.warn("Empty colum definition, skiping column.");
|
||||||
std::unique_ptr<Filler> f(new SkipFiller());
|
std::unique_ptr<Filler> f(new SkipFiller());
|
||||||
return make_option(std::move(f));
|
return make_option(std::move(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
name = tmp_vec[0];
|
name = tmp_vec[0];
|
||||||
type = tmp_vec[1];
|
type = tmp_vec[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create adequat filler
|
||||||
if (equal_str(type, "id")) {
|
if (equal_str(type, "id")) {
|
||||||
std::unique_ptr<Filler> f(
|
std::unique_ptr<Filler> f(
|
||||||
name[0] == '\0' ? new IdFiller<TG>()
|
name[0] == '\0' ? new IdFiller<TG>()
|
||||||
@ -245,8 +259,6 @@ private:
|
|||||||
|
|
||||||
// *********************** PROPERTIES
|
// *********************** PROPERTIES
|
||||||
} else if (equal_str(type, "bool")) {
|
} else if (equal_str(type, "bool")) {
|
||||||
// return make_filler_property<BoolFiller>(vertex, name,
|
|
||||||
// Flags::Bool);
|
|
||||||
std::unique_ptr<Filler> f(
|
std::unique_ptr<Filler> f(
|
||||||
new BoolFiller<TG>(property_key<TG>(name, Flags::Bool)));
|
new BoolFiller<TG>(property_key<TG>(name, Flags::Bool)));
|
||||||
return make_option(std::move(f));
|
return make_option(std::move(f));
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
#include "storage/vertex_accessor.hpp"
|
#include "storage/vertex_accessor.hpp"
|
||||||
|
|
||||||
// Holder for element data which he can then insert as a vertex or edge into the
|
// Holder for element data which he can then insert as a vertex or edge into the
|
||||||
// database depending on the available data.
|
// database depending on the available data and called add_* method.
|
||||||
class ElementSkeleton
|
class ElementSkeleton
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -99,15 +99,6 @@ public:
|
|||||||
Option<size_t> element_id() { return el_id; }
|
Option<size_t> element_id() { return el_id; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// template <class A>
|
|
||||||
// void add_propreties(A &ra)
|
|
||||||
// {
|
|
||||||
// for (auto prop : properties) {
|
|
||||||
// assert(prop.prop.is_present());
|
|
||||||
// ra.set(prop.key, prop.prop.take());
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
DbAccessor &db;
|
DbAccessor &db;
|
||||||
|
|
||||||
Option<size_t> el_id;
|
Option<size_t> el_id;
|
||||||
|
@ -5,6 +5,10 @@
|
|||||||
#include "import/fillings/filler.hpp"
|
#include "import/fillings/filler.hpp"
|
||||||
#include "utils/array_store.hpp"
|
#include "utils/array_store.hpp"
|
||||||
|
|
||||||
|
// Parses Array of elements type T.
|
||||||
|
// TG - Type group
|
||||||
|
// T - type of element in array.
|
||||||
|
// A - property type in database for holding arrays.
|
||||||
template <class TG, class T, class A>
|
template <class TG, class T, class A>
|
||||||
class ArrayFiller : public Filler
|
class ArrayFiller : public Filler
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses boolean.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class BoolFiller : public Filler
|
class BoolFiller : public Filler
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses double.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class DoubleFiller : public Filler
|
class DoubleFiller : public Filler
|
||||||
{
|
{
|
||||||
|
@ -3,6 +3,8 @@
|
|||||||
#include "import/element_skeleton.hpp"
|
#include "import/element_skeleton.hpp"
|
||||||
#include "utils/option.hpp"
|
#include "utils/option.hpp"
|
||||||
|
|
||||||
|
// Common class for varius classes which accept one part from data line in
|
||||||
|
// import, parses it and adds it into element skelleton.
|
||||||
class Filler
|
class Filler
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses float.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class FloatFiller : public Filler
|
class FloatFiller : public Filler
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses from id of vertex for edge.
|
||||||
class FromFiller : public Filler
|
class FromFiller : public Filler
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -2,6 +2,8 @@
|
|||||||
|
|
||||||
#include "import/fillings/filler.hpp"
|
#include "import/fillings/filler.hpp"
|
||||||
|
|
||||||
|
// Parses import local Id.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class IdFiller : public Filler
|
class IdFiller : public Filler
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses int32.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class Int32Filler : public Filler
|
class Int32Filler : public Filler
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses int64.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class Int64Filler : public Filler
|
class Int64Filler : public Filler
|
||||||
{
|
{
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "database/db_accessor.hpp"
|
#include "database/db_accessor.hpp"
|
||||||
#include "import/fillings/filler.hpp"
|
#include "import/fillings/filler.hpp"
|
||||||
|
|
||||||
|
// Parses array of labels.
|
||||||
class LabelFiller : public Filler
|
class LabelFiller : public Filler
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Skips column.
|
||||||
class SkipFiller : public Filler
|
class SkipFiller : public Filler
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses string.
|
||||||
|
// TG - Type group
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class StringFiller : public Filler
|
class StringFiller : public Filler
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include "storage/model/properties/flags.hpp"
|
#include "storage/model/properties/flags.hpp"
|
||||||
#include "storage/model/properties/property_family.hpp"
|
#include "storage/model/properties/property_family.hpp"
|
||||||
|
|
||||||
|
// Parses to import local id of vertex for edge.
|
||||||
class ToFiller : public Filler
|
class ToFiller : public Filler
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "database/db_accessor.hpp"
|
#include "database/db_accessor.hpp"
|
||||||
#include "import/fillings/filler.hpp"
|
#include "import/fillings/filler.hpp"
|
||||||
|
|
||||||
|
// Parses type of edge.
|
||||||
class TypeFiller : public Filler
|
class TypeFiller : public Filler
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -336,6 +336,21 @@ auto load_queries(Db &db)
|
|||||||
.has_property(prop_name, args[0])
|
.has_property(prop_name, args[0])
|
||||||
.clone_to(n) // Savepoint
|
.clone_to(n) // Savepoint
|
||||||
.replace(r); // Load savepoint
|
.replace(r); // Load savepoint
|
||||||
|
// Above statments + .to().for_all([&](auto m) {}) will unrool into:
|
||||||
|
// for(auto edge:type.index.for_range(t)){
|
||||||
|
// auto from_vertex=edge.from();
|
||||||
|
// if(from_vertex.fill()){
|
||||||
|
// auto &prop=from_vertex.at(prop_name);
|
||||||
|
// if(prop==args[0]){
|
||||||
|
// auto to_vertex=edge.to();
|
||||||
|
// if(to_vertex.fill()){
|
||||||
|
// // Here you have all data.
|
||||||
|
// // n == from_vertex
|
||||||
|
// // m == to_vertex
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
auto it_vertex = t.vertex_access()
|
auto it_vertex = t.vertex_access()
|
||||||
.fill()
|
.fill()
|
||||||
@ -343,6 +358,24 @@ auto load_queries(Db &db)
|
|||||||
.clone_to(n) // Savepoint
|
.clone_to(n) // Savepoint
|
||||||
.out()
|
.out()
|
||||||
.type(type);
|
.type(type);
|
||||||
|
// Above statments + .to().for_all([&](auto m) {}) will unrool into:
|
||||||
|
// for(auto from_vertex:t.vertex_access(t)){
|
||||||
|
// if(from_vertex.fill()){
|
||||||
|
// auto &prop=from_vertex.at(prop_name);
|
||||||
|
// if(prop==args[0]){
|
||||||
|
// for(auto edge:from_vertex.out()){
|
||||||
|
// if(edge.edge_type() == type){
|
||||||
|
// auto to_vertex=edge.to();
|
||||||
|
// if(to_vertex.fill()){
|
||||||
|
// // Here you have all data.
|
||||||
|
// // n == from_vertex
|
||||||
|
// // m == to_vertex
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
if (it_type.count() > it_vertex.count()) {
|
if (it_type.count() > it_vertex.count()) {
|
||||||
// Going through vertices wiil probably be faster
|
// Going through vertices wiil probably be faster
|
||||||
|
@ -13,14 +13,17 @@ namespace serialization
|
|||||||
template <class W>
|
template <class W>
|
||||||
void serialize_vertex(VertexAccessor const &v, W &writer)
|
void serialize_vertex(VertexAccessor const &v, W &writer)
|
||||||
{
|
{
|
||||||
|
// Serialize vertex id
|
||||||
writer.start_vertex(v.id());
|
writer.start_vertex(v.id());
|
||||||
|
|
||||||
|
// Serialize labels
|
||||||
auto const &labels = v.labels();
|
auto const &labels = v.labels();
|
||||||
writer.label_count(labels.size());
|
writer.label_count(labels.size());
|
||||||
for (auto &label : labels) {
|
for (auto &label : labels) {
|
||||||
writer.label(label.get().str());
|
writer.label(label.get().str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Serialize propertys
|
||||||
auto const &propertys = v.properties();
|
auto const &propertys = v.properties();
|
||||||
writer.property_count(propertys.size());
|
writer.property_count(propertys.size());
|
||||||
for (auto &prop : propertys) {
|
for (auto &prop : propertys) {
|
||||||
@ -35,10 +38,13 @@ void serialize_vertex(VertexAccessor const &v, W &writer)
|
|||||||
template <class W>
|
template <class W>
|
||||||
void serialize_edge(EdgeAccessor const &e, W &writer)
|
void serialize_edge(EdgeAccessor const &e, W &writer)
|
||||||
{
|
{
|
||||||
|
// Serialize to and from vertices ids.
|
||||||
writer.start_edge(e.from().id(), e.to().id());
|
writer.start_edge(e.from().id(), e.to().id());
|
||||||
|
|
||||||
|
// Serialize type
|
||||||
writer.edge_type(e.edge_type().str());
|
writer.edge_type(e.edge_type().str());
|
||||||
|
|
||||||
|
// Serialize propertys
|
||||||
auto const &propertys = e.properties();
|
auto const &propertys = e.properties();
|
||||||
writer.property_count(propertys.size());
|
writer.property_count(propertys.size());
|
||||||
for (auto &prop : propertys) {
|
for (auto &prop : propertys) {
|
||||||
@ -57,12 +63,14 @@ std::pair<Id, VertexAccessor> deserialize_vertex(DbAccessor &db, D &reader)
|
|||||||
auto v = db.vertex_insert();
|
auto v = db.vertex_insert();
|
||||||
auto old_id = reader.vertex_start();
|
auto old_id = reader.vertex_start();
|
||||||
|
|
||||||
|
// Deserialize labels
|
||||||
std::string s;
|
std::string s;
|
||||||
for (auto i = reader.label_count(); i > 0; i--) {
|
for (auto i = reader.label_count(); i > 0; i--) {
|
||||||
auto &label_key = db.label_find_or_create(reader.label().c_str());
|
auto &label_key = db.label_find_or_create(reader.label().c_str());
|
||||||
v.add_label(label_key);
|
v.add_label(label_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deserialize propertys
|
||||||
for (auto i = reader.property_count(); i > 0; i--) {
|
for (auto i = reader.property_count(); i > 0; i--) {
|
||||||
auto &family =
|
auto &family =
|
||||||
db.vertex_property_family_get(reader.property_name().c_str());
|
db.vertex_property_family_get(reader.property_name().c_str());
|
||||||
@ -81,14 +89,17 @@ template <class D, class S>
|
|||||||
EdgeAccessor deserialize_edge(DbAccessor &db, D &reader, S &store)
|
EdgeAccessor deserialize_edge(DbAccessor &db, D &reader, S &store)
|
||||||
{
|
{
|
||||||
auto ids = reader.edge_start();
|
auto ids = reader.edge_start();
|
||||||
|
// Deserialize from and to ids of vertices.
|
||||||
VertexAccessor &from = store.at(ids.first);
|
VertexAccessor &from = store.at(ids.first);
|
||||||
VertexAccessor &to = store.at(ids.second);
|
VertexAccessor &to = store.at(ids.second);
|
||||||
|
|
||||||
auto e = db.edge_insert(from, to);
|
auto e = db.edge_insert(from, to);
|
||||||
|
|
||||||
|
// Deserialize type
|
||||||
auto &edge_type_key = db.type_find_or_create(reader.edge_type().c_str());
|
auto &edge_type_key = db.type_find_or_create(reader.edge_type().c_str());
|
||||||
e.edge_type(edge_type_key);
|
e.edge_type(edge_type_key);
|
||||||
|
|
||||||
|
// Deserialize properties
|
||||||
for (auto i = reader.property_count(); i > 0; i--) {
|
for (auto i = reader.property_count(); i > 0; i--) {
|
||||||
auto &family =
|
auto &family =
|
||||||
db.edge_property_family_get(reader.property_name().c_str());
|
db.edge_property_family_get(reader.property_name().c_str());
|
||||||
|
@ -12,6 +12,9 @@
|
|||||||
// Decodes stored snapshot.
|
// Decodes stored snapshot.
|
||||||
// Caller must respect loading order to be same as stored order with
|
// Caller must respect loading order to be same as stored order with
|
||||||
// SnapshotEncoder.
|
// SnapshotEncoder.
|
||||||
|
// Main idea of knowing when something starts and ends is at certain points try
|
||||||
|
// to deserialize string and compare it with logically expected string seted by
|
||||||
|
// the SnapshotEncoder.
|
||||||
class SnapshotDecoder : public GraphDecoder
|
class SnapshotDecoder : public GraphDecoder
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -68,6 +71,8 @@ public:
|
|||||||
T property()
|
T property()
|
||||||
{
|
{
|
||||||
if (decoder.is_list()) {
|
if (decoder.is_list()) {
|
||||||
|
// Whe are deserializing an array.
|
||||||
|
|
||||||
auto size = decoder.list_header();
|
auto size = decoder.list_header();
|
||||||
if (decoder.is_bool()) {
|
if (decoder.is_bool()) {
|
||||||
ArrayStore<bool> store;
|
ArrayStore<bool> store;
|
||||||
@ -100,6 +105,8 @@ public:
|
|||||||
return T::handle(std::move(store));
|
return T::handle(std::move(store));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// Whe are deserializing a primitive.
|
||||||
|
|
||||||
if (decoder.is_bool()) {
|
if (decoder.is_bool()) {
|
||||||
return T::handle(decoder.read_bool());
|
return T::handle(decoder.read_bool());
|
||||||
|
|
||||||
|
@ -11,8 +11,7 @@
|
|||||||
#include "utils/stream_wrapper.hpp"
|
#include "utils/stream_wrapper.hpp"
|
||||||
|
|
||||||
// Represents creation of a snapshot. Contains all necessary informations
|
// Represents creation of a snapshot. Contains all necessary informations
|
||||||
// for
|
// for write. Caller is responisble to structure his calls as following:
|
||||||
// write. Caller is responisble to structure his calls as following:
|
|
||||||
// * property_name_init
|
// * property_name_init
|
||||||
// * label_name_init
|
// * label_name_init
|
||||||
// * edge_type_name_init
|
// * edge_type_name_init
|
||||||
|
@ -53,6 +53,7 @@ private:
|
|||||||
// Will return different name on every call.
|
// Will return different name on every call.
|
||||||
std::string snapshot_file(std::time_t const &now, const char *type);
|
std::string snapshot_file(std::time_t const &now, const char *type);
|
||||||
|
|
||||||
|
// Returns name of snapshot commit file.
|
||||||
std::string snapshot_commit_file();
|
std::string snapshot_commit_file();
|
||||||
|
|
||||||
// Path to directory of database. Ensures that all necessary directorys
|
// Path to directory of database. Ensures that all necessary directorys
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
#include "mvcc/record.hpp"
|
#include "mvcc/record.hpp"
|
||||||
#include "storage/model/edge_model.hpp"
|
#include "storage/model/edge_model.hpp"
|
||||||
// #include "storage/model/properties/traversers/jsonwriter.hpp"
|
|
||||||
|
|
||||||
class Edge : public mvcc::Record<Edge>
|
class Edge : public mvcc::Record<Edge>
|
||||||
{
|
{
|
||||||
|
@ -39,6 +39,7 @@ public:
|
|||||||
|
|
||||||
CharStr char_str() { return CharStr(&id[0]); }
|
CharStr char_str() { return CharStr(&id[0]); }
|
||||||
|
|
||||||
|
// Index of esges which have this type.
|
||||||
type_index_t &index() const;
|
type_index_t &index() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -18,6 +18,7 @@ class Garbage
|
|||||||
public:
|
public:
|
||||||
Garbage(tx::Engine &e) : engine(e) {}
|
Garbage(tx::Engine &e) : engine(e) {}
|
||||||
|
|
||||||
|
// Will safely dispose of data.
|
||||||
void dispose(tx::Snapshot<Id> &&snapshot, DeleteSensitive *data);
|
void dispose(tx::Snapshot<Id> &&snapshot, DeleteSensitive *data);
|
||||||
|
|
||||||
// Cleaner thread should call this method every some time. Removes data
|
// Cleaner thread should call this method every some time. Removes data
|
||||||
|
@ -49,8 +49,10 @@ public:
|
|||||||
// Will change ordering of record to descending.
|
// Will change ordering of record to descending.
|
||||||
void set_descending();
|
void set_descending();
|
||||||
|
|
||||||
|
// true if record is nullptr.
|
||||||
bool empty() const;
|
bool empty() const;
|
||||||
|
|
||||||
|
// True if this index record i valid for given Transaction.
|
||||||
bool is_valid(tx::Transaction &t) const;
|
bool is_valid(tx::Transaction &t) const;
|
||||||
|
|
||||||
// True if it can be removed.
|
// True if it can be removed.
|
||||||
@ -62,7 +64,7 @@ public:
|
|||||||
const K key;
|
const K key;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool descending = false;
|
bool descending = false; // TODO: this can be passed as template argument.
|
||||||
typename TG::record_t *const record{nullptr};
|
typename TG::record_t *const record{nullptr};
|
||||||
typename TG::vlist_t *const vlist{nullptr};
|
typename TG::vlist_t *const vlist{nullptr};
|
||||||
};
|
};
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
// #pragma once
|
|
||||||
//
|
|
||||||
// TODO: DEPRICATED
|
|
||||||
//
|
|
||||||
// #include <memory>
|
|
||||||
//
|
|
||||||
// #include "data_structures/concurrent/concurrent_set.hpp"
|
|
||||||
// #include "storage/indexes/index_record.hpp"
|
|
||||||
//
|
|
||||||
// template <class T>
|
|
||||||
// class IndexRecordCollection
|
|
||||||
// {
|
|
||||||
// public:
|
|
||||||
// using index_record_t = IndexRecord<T>;
|
|
||||||
// using index_record_collection_t = ConcurrentSet<index_record_t>;
|
|
||||||
//
|
|
||||||
// IndexRecordCollection()
|
|
||||||
// : records(std::make_unique<index_record_collection_t>())
|
|
||||||
// {
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// void add(index_record_t &&record)
|
|
||||||
// {
|
|
||||||
// auto accessor = records->access();
|
|
||||||
// accessor.insert(std::forward<index_record_t>(record));
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// auto access()
|
|
||||||
// {
|
|
||||||
// return records->access();
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // TODO: iterator and proxy
|
|
||||||
//
|
|
||||||
// private:
|
|
||||||
// std::unique_ptr<index_record_collection_t> records;
|
|
||||||
// };
|
|
||||||
//
|
|
||||||
// using VertexIndexRecordCollection = IndexRecordCollection<Vertex>;
|
|
||||||
// using EdgeIndexRecordCollection = IndexRecordCollection<Edge>;
|
|
@ -4,12 +4,14 @@
|
|||||||
#include "storage/type_group_edge.hpp"
|
#include "storage/type_group_edge.hpp"
|
||||||
#include "storage/type_group_vertex.hpp"
|
#include "storage/type_group_vertex.hpp"
|
||||||
|
|
||||||
|
// Record for updating indexes of edge
|
||||||
struct IndexUpdateEdge
|
struct IndexUpdateEdge
|
||||||
{
|
{
|
||||||
EdgeRecord *vlist;
|
EdgeRecord *vlist;
|
||||||
Edge *record;
|
Edge *record;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Record for updatin indexes of vertex
|
||||||
struct IndexUpdateVertex
|
struct IndexUpdateVertex
|
||||||
{
|
{
|
||||||
VertexRecord *vlist;
|
VertexRecord *vlist;
|
||||||
|
@ -23,7 +23,7 @@ public:
|
|||||||
|
|
||||||
// currently caller has to get index through object that contains
|
// currently caller has to get index through object that contains
|
||||||
// the index
|
// the index
|
||||||
|
|
||||||
// TODO: redesign
|
// TODO: redesign
|
||||||
//
|
//
|
||||||
// this was a nice try
|
// this was a nice try
|
||||||
@ -252,15 +252,20 @@ private:
|
|||||||
|
|
||||||
auto oindex = holder.get_write(t.trans);
|
auto oindex = holder.get_write(t.trans);
|
||||||
if (oindex.is_present()) {
|
if (oindex.is_present()) {
|
||||||
|
// Inexed to which whe must insert is present. This wouldn't be the
|
||||||
|
// case if someone removed it.
|
||||||
auto index = oindex.get();
|
auto index = oindex.get();
|
||||||
|
|
||||||
// Iterate over all elements and add them into index. Fail if
|
// Iterate over all elements and add them into index. Fail if
|
||||||
// some insert failed.
|
// some insert failed.
|
||||||
bool res = iter.all([&](auto elem) {
|
bool res = iter.all([&](auto elem) {
|
||||||
|
// Try to insert record.
|
||||||
if (!index->insert(elem.first.create_index_record(
|
if (!index->insert(elem.first.create_index_record(
|
||||||
std::move(elem.second)))) {
|
std::move(elem.second)))) {
|
||||||
|
|
||||||
// Index is probably unique.
|
// Index is probably unique.
|
||||||
|
|
||||||
|
// Index wasn't successfully filled so whe should remove it
|
||||||
|
// an safely dispose of it.
|
||||||
auto owned_maybe = holder.remove_index(index);
|
auto owned_maybe = holder.remove_index(index);
|
||||||
if (owned_maybe.is_present()) {
|
if (owned_maybe.is_present()) {
|
||||||
db.garbage.dispose(db.tx_engine.snapshot(),
|
db.garbage.dispose(db.tx_engine.snapshot(),
|
||||||
@ -272,6 +277,8 @@ private:
|
|||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
if (res) {
|
if (res) {
|
||||||
|
// Index has been updated accordingly and whe can activate it
|
||||||
|
// for read.
|
||||||
index->activate();
|
index->activate();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -286,6 +293,8 @@ private:
|
|||||||
{
|
{
|
||||||
auto owned_maybe = ih.remove_index();
|
auto owned_maybe = ih.remove_index();
|
||||||
if (owned_maybe.is_present()) {
|
if (owned_maybe.is_present()) {
|
||||||
|
// Index was successfully removed so whe are responsible for
|
||||||
|
// dispoising it safely.
|
||||||
db.garbage.dispose(db.tx_engine.snapshot(),
|
db.garbage.dispose(db.tx_engine.snapshot(),
|
||||||
owned_maybe.get().release());
|
owned_maybe.get().release());
|
||||||
return true;
|
return true;
|
||||||
|
@ -1,21 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
// TODO: DEPRICATED
|
|
||||||
|
|
||||||
template <class T>
|
|
||||||
struct Ascending
|
|
||||||
{
|
|
||||||
constexpr bool operator()(const T &lhs, const T &rhs) const
|
|
||||||
{
|
|
||||||
return lhs < rhs;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class T>
|
|
||||||
struct Descending
|
|
||||||
{
|
|
||||||
constexpr bool operator()(const T &lhs, const T &rhs) const
|
|
||||||
{
|
|
||||||
return lhs > rhs;
|
|
||||||
}
|
|
||||||
};
|
|
@ -42,6 +42,7 @@ public:
|
|||||||
|
|
||||||
CharStr char_str() const { return CharStr(name.c_str()); }
|
CharStr char_str() const { return CharStr(name.c_str()); }
|
||||||
|
|
||||||
|
// Index of vertices with current label.
|
||||||
label_index_t &index() const;
|
label_index_t &index() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
#include "data_structures/map/rh_hashmultimap.hpp"
|
#include "data_structures/map/rh_hashmultimap.hpp"
|
||||||
#include "storage/edge_record.hpp"
|
#include "storage/edge_record.hpp"
|
||||||
// #include "storage/vertex_record.hpp"
|
|
||||||
|
|
||||||
class EdgeMap
|
class EdgeMap
|
||||||
{
|
{
|
||||||
|
@ -15,6 +15,9 @@ template <class TG, class T>
|
|||||||
using type_key_t =
|
using type_key_t =
|
||||||
typename PropertyFamily<TG>::PropertyType::template PropertyTypeKey<T>;
|
typename PropertyFamily<TG>::PropertyType::template PropertyTypeKey<T>;
|
||||||
|
|
||||||
|
// Collcetion of stored properties.
|
||||||
|
// NOTE: Currently underlying strucutre is a vector which is fine for smaller
|
||||||
|
// number of properties.
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class Properties
|
class Properties
|
||||||
{
|
{
|
||||||
@ -41,10 +44,12 @@ public:
|
|||||||
template <class T>
|
template <class T>
|
||||||
OptionPtr<const T> at(type_key_t<T> &key) const
|
OptionPtr<const T> at(type_key_t<T> &key) const
|
||||||
{
|
{
|
||||||
auto f_key = key.family_key();
|
|
||||||
for (auto &prop : props) {
|
for (auto &prop : props) {
|
||||||
if (prop.key == f_key) {
|
if (prop.key == key) {
|
||||||
return OptionPtr<const T>(&(prop.template as<T>()));
|
if (prop.template is<T>()) {
|
||||||
|
return OptionPtr<const T>(&(prop.template as<T>()));
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,13 +104,19 @@ public:
|
|||||||
// Ordered on POINTERS to PropertyType.
|
// Ordered on POINTERS to PropertyType.
|
||||||
// When compared with PropertyFamilyKey behaves as PropertyFamilyKey.
|
// When compared with PropertyFamilyKey behaves as PropertyFamilyKey.
|
||||||
template <class T>
|
template <class T>
|
||||||
class PropertyTypeKey : public TotalOrdering<PropertyTypeKey<T>>
|
class PropertyTypeKey
|
||||||
|
: public TotalOrdering<PropertyTypeKey<T>>,
|
||||||
|
public TotalOrdering<PropertyFamilyKey, PropertyTypeKey<T>>,
|
||||||
|
public TotalOrdering<PropertyTypeKey<T>, PropertyFamilyKey>
|
||||||
{
|
{
|
||||||
friend class PropertyType;
|
friend class PropertyType;
|
||||||
|
|
||||||
PropertyTypeKey(const PropertyType &type) : type(type) {}
|
PropertyTypeKey(const PropertyType &type) : type(type) {}
|
||||||
public:
|
public:
|
||||||
PropertyFamilyKey family_key() { return PropertyFamilyKey(type); }
|
PropertyFamilyKey family_key() const
|
||||||
|
{
|
||||||
|
return PropertyFamilyKey(type);
|
||||||
|
}
|
||||||
|
|
||||||
Type const &prop_type() const { return type.type; }
|
Type const &prop_type() const { return type.type; }
|
||||||
|
|
||||||
@ -126,6 +132,30 @@ public:
|
|||||||
return &(lhs.type) < &(rhs.type);
|
return &(lhs.type) < &(rhs.type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
friend bool operator==(const PropertyFamilyKey &lhs,
|
||||||
|
const PropertyTypeKey &rhs)
|
||||||
|
{
|
||||||
|
return lhs == rhs.family_key();
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator<(const PropertyFamilyKey &lhs,
|
||||||
|
const PropertyTypeKey &rhs)
|
||||||
|
{
|
||||||
|
return lhs < rhs.family_key();
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator==(const PropertyTypeKey &lhs,
|
||||||
|
const PropertyFamilyKey &rhs)
|
||||||
|
{
|
||||||
|
return lhs.family_key() == rhs;
|
||||||
|
}
|
||||||
|
|
||||||
|
friend bool operator<(const PropertyTypeKey &lhs,
|
||||||
|
const PropertyFamilyKey &rhs)
|
||||||
|
{
|
||||||
|
return lhs.family_key() < rhs;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const PropertyType &type;
|
const PropertyType &type;
|
||||||
};
|
};
|
||||||
@ -190,12 +220,15 @@ public:
|
|||||||
friend bool operator==(const PropertyFamily &lhs,
|
friend bool operator==(const PropertyFamily &lhs,
|
||||||
const PropertyFamily &rhs);
|
const PropertyFamily &rhs);
|
||||||
|
|
||||||
|
// Place for index of TG::elements which have property from this family.
|
||||||
IndexHolder<TG, std::nullptr_t> index;
|
IndexHolder<TG, std::nullptr_t> index;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const std::string name_v;
|
const std::string name_v;
|
||||||
|
|
||||||
// This is exclusivly for getNull method.
|
// This is exclusivly for getNull method.
|
||||||
PropertyType *null_type{nullptr};
|
PropertyType *null_type{nullptr};
|
||||||
|
|
||||||
// TODO: Because types wont be removed this could be done with more efficent
|
// TODO: Because types wont be removed this could be done with more efficent
|
||||||
// data structure.
|
// data structure.
|
||||||
ConcurrentMap<Type, std::unique_ptr<PropertyType>> types;
|
ConcurrentMap<Type, std::unique_ptr<PropertyType>> types;
|
||||||
|
@ -68,22 +68,22 @@
|
|||||||
// Generates field in a union with a given type and name
|
// Generates field in a union with a given type and name
|
||||||
#define GENERATE_UNION_FIELD(type_name, union_name) type_name union_name
|
#define GENERATE_UNION_FIELD(type_name, union_name) type_name union_name
|
||||||
|
|
||||||
// Generates signatures GENERATE_define_generator(type_name, union_name );
|
// Generates signatures define_generator(type_name, union_name );
|
||||||
// for every pair type,property
|
// for every pair type,property
|
||||||
#define GENERATE_FOR_ALL_PROPERTYS(define_generator) \
|
#define GENERATE_FOR_ALL_PROPERTYS(define_generator) \
|
||||||
GENERATE_##define_generator(Null, null_v); \
|
define_generator(Null, null_v); \
|
||||||
GENERATE_##define_generator(Bool, bool_v); \
|
define_generator(Bool, bool_v); \
|
||||||
GENERATE_##define_generator(Int32, int32_v); \
|
define_generator(Int32, int32_v); \
|
||||||
GENERATE_##define_generator(Int64, int64_v); \
|
define_generator(Int64, int64_v); \
|
||||||
GENERATE_##define_generator(Float, float_V); \
|
define_generator(Float, float_V); \
|
||||||
GENERATE_##define_generator(Double, double_v); \
|
define_generator(Double, double_v); \
|
||||||
GENERATE_##define_generator(String, string_v); \
|
define_generator(String, string_v); \
|
||||||
GENERATE_##define_generator(ArrayBool, array_bool); \
|
define_generator(ArrayBool, array_bool); \
|
||||||
GENERATE_##define_generator(ArrayInt32, array_int32); \
|
define_generator(ArrayInt32, array_int32); \
|
||||||
GENERATE_##define_generator(ArrayInt64, array_int64); \
|
define_generator(ArrayInt64, array_int64); \
|
||||||
GENERATE_##define_generator(ArrayFloat, array_float); \
|
define_generator(ArrayFloat, array_float); \
|
||||||
GENERATE_##define_generator(ArrayDouble, array_double); \
|
define_generator(ArrayDouble, array_double); \
|
||||||
GENERATE_##define_generator(ArrayString, array_string);
|
define_generator(ArrayString, array_string);
|
||||||
|
|
||||||
// Holds property and has some means of determining its type.
|
// Holds property and has some means of determining its type.
|
||||||
// T must have method get_type() const which returns Type.
|
// T must have method get_type() const which returns Type.
|
||||||
@ -97,12 +97,13 @@ class PropertyHolder
|
|||||||
public:
|
public:
|
||||||
PropertyHolder() = delete;
|
PropertyHolder() = delete;
|
||||||
|
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CONSTRUCTOR_FOR_DATA);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CONSTRUCTOR_FOR_DATA);
|
||||||
|
|
||||||
PropertyHolder(PropertyHolder const &other) : key(other.key)
|
PropertyHolder(PropertyHolder const &other) : key(other.key)
|
||||||
{
|
{
|
||||||
switch (other.key.get_type().flags()) {
|
switch (other.key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_CONSTRUCTOR_COPY);
|
GENERATE_FOR_ALL_PROPERTYS(
|
||||||
|
GENERATE_CASE_CLAUSE_FOR_CONSTRUCTOR_COPY);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -112,7 +113,8 @@ public:
|
|||||||
PropertyHolder(PropertyHolder &&other) : key(other.key)
|
PropertyHolder(PropertyHolder &&other) : key(other.key)
|
||||||
{
|
{
|
||||||
switch (other.key.get_type().flags()) {
|
switch (other.key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
|
GENERATE_FOR_ALL_PROPERTYS(
|
||||||
|
GENERATE_CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -123,7 +125,8 @@ public:
|
|||||||
{
|
{
|
||||||
assert(other.key.get_type() == key.get_type());
|
assert(other.key.get_type() == key.get_type());
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
|
GENERATE_FOR_ALL_PROPERTYS(
|
||||||
|
GENERATE_CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -133,7 +136,7 @@ public:
|
|||||||
~PropertyHolder()
|
~PropertyHolder()
|
||||||
{
|
{
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_DESTRUCTOR);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_DESTRUCTOR);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -161,7 +164,7 @@ public:
|
|||||||
void accept(Handler &h) const
|
void accept(Handler &h) const
|
||||||
{
|
{
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_HANDLER);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_HANDLER);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -172,7 +175,8 @@ public:
|
|||||||
void accept_primitive(Handler &h) const
|
void accept_primitive(Handler &h) const
|
||||||
{
|
{
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_HANDLER_PRIMITIVE);
|
GENERATE_FOR_ALL_PROPERTYS(
|
||||||
|
GENERATE_CASE_CLAUSE_FOR_HANDLER_PRIMITIVE);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -181,7 +185,7 @@ public:
|
|||||||
std::ostream &print(std::ostream &stream) const
|
std::ostream &print(std::ostream &stream) const
|
||||||
{
|
{
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_PRINT);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_PRINT);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -197,7 +201,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (key == other.key) {
|
if (key == other.key) {
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_COMPARISON);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_COMPARISON);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -211,7 +215,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (key.get_type() == other.key.get_type()) {
|
if (key.get_type() == other.key.get_type()) {
|
||||||
switch (key.get_type().flags()) {
|
switch (key.get_type().flags()) {
|
||||||
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_COMPARISON);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_COMPARISON);
|
||||||
default:
|
default:
|
||||||
assert(false);
|
assert(false);
|
||||||
}
|
}
|
||||||
@ -269,7 +273,7 @@ private:
|
|||||||
// Stored data.
|
// Stored data.
|
||||||
union
|
union
|
||||||
{
|
{
|
||||||
GENERATE_FOR_ALL_PROPERTYS(UNION_FIELD);
|
GENERATE_FOR_ALL_PROPERTYS(GENERATE_UNION_FIELD);
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -8,7 +8,8 @@ template <class TG>
|
|||||||
using property_key =
|
using property_key =
|
||||||
typename PropertyFamily<TG>::PropertyType::PropertyFamilyKey;
|
typename PropertyFamily<TG>::PropertyType::PropertyFamilyKey;
|
||||||
|
|
||||||
// Property Class designated for creation outside the database.
|
// Property Class designated for creation inside the database. Meant for
|
||||||
|
// storage.
|
||||||
template <class TG>
|
template <class TG>
|
||||||
class StoredProperty : public PropertyHolder<property_key<TG>>
|
class StoredProperty : public PropertyHolder<property_key<TG>>
|
||||||
{
|
{
|
||||||
@ -16,7 +17,8 @@ class StoredProperty : public PropertyHolder<property_key<TG>>
|
|||||||
const static class PropertyFamily<TG> null_family;
|
const static class PropertyFamily<TG> null_family;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Needed for properties to return reference on stored property when they
|
// NOTE: Needed for properties to return reference on stored property when
|
||||||
|
// they
|
||||||
// don't cointain searched property.
|
// don't cointain searched property.
|
||||||
const static class StoredProperty<TG> null;
|
const static class StoredProperty<TG> null;
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ public:
|
|||||||
// True if vertex isn't connected to any other vertex.
|
// True if vertex isn't connected to any other vertex.
|
||||||
bool isolated() const;
|
bool isolated() const;
|
||||||
|
|
||||||
// False if it's label with it already.
|
// False if it already has labe.
|
||||||
bool add_label(const Label &label);
|
bool add_label(const Label &label);
|
||||||
|
|
||||||
// False if it doesn't have label.
|
// False if it doesn't have label.
|
||||||
|
@ -60,26 +60,6 @@ public:
|
|||||||
return a > b || a == b;
|
return a > b || a == b;
|
||||||
}
|
}
|
||||||
|
|
||||||
// // true if no border or this > key or this >= key depends on border type.
|
|
||||||
// bool operator>(const T &other) const
|
|
||||||
// {
|
|
||||||
// return !key.is_present() || key.get() > other ||
|
|
||||||
// (type == Including && key.get() == other);
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // true if this border is inclusive and key is present and key == other.
|
|
||||||
// bool operator==(const T &other) const
|
|
||||||
// {
|
|
||||||
// return type == Including && key.is_present() && key.get() == other;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// // true if no border or this < key or this <= key depends on border type.
|
|
||||||
// bool operator<(const T &other) const
|
|
||||||
// {
|
|
||||||
// return !key.is_present() || key.get() < other ||
|
|
||||||
// (type == Including && key.get() == other);
|
|
||||||
// }
|
|
||||||
|
|
||||||
Option<T> key;
|
Option<T> key;
|
||||||
BorderType type;
|
BorderType type;
|
||||||
};
|
};
|
||||||
|
@ -98,6 +98,12 @@ public:
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Calls update on values and returns resoult.
|
||||||
|
auto update()
|
||||||
|
{
|
||||||
|
return map([](auto ar) { return ar.update(); });
|
||||||
|
}
|
||||||
|
|
||||||
// Filters with property under given key
|
// Filters with property under given key
|
||||||
template <class KEY>
|
template <class KEY>
|
||||||
auto has_property(KEY &key)
|
auto has_property(KEY &key)
|
||||||
@ -112,13 +118,6 @@ public:
|
|||||||
return filter([&](auto &va) { return va.at(key) == prop; });
|
return filter([&](auto &va) { return va.at(key) == prop; });
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy-s all pasing value to t before they are returned.
|
|
||||||
// auto clone_to(Option<T> &t)
|
|
||||||
// {
|
|
||||||
// return iter::make_inspect<decltype(std::move(*this))>(
|
|
||||||
// std::move(*this), [&](auto &v) { t = Option<T>(v); });
|
|
||||||
// }
|
|
||||||
|
|
||||||
// Copy-s pasing value to t before they are returned.
|
// Copy-s pasing value to t before they are returned.
|
||||||
auto clone_to(Option<const T> &t)
|
auto clone_to(Option<const T> &t)
|
||||||
{
|
{
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
// Represents number of to be returned elements from iterator. Where acutal
|
// Represents number of to be returned elements from iterator. Where acutal
|
||||||
// number is probably somwhere in [min,max].
|
// number is probably somwhere in [min,max].
|
||||||
|
// NOTE: Experimental
|
||||||
class Count : public TotalOrdering<Count>
|
class Count : public TotalOrdering<Count>
|
||||||
{
|
{
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ namespace iter
|
|||||||
// I - iterator type
|
// I - iterator type
|
||||||
// J - iterator type returned from OP
|
// J - iterator type returned from OP
|
||||||
// OP - type of mapper function
|
// OP - type of mapper function
|
||||||
|
// TODO: Split into flat operation and map operation.
|
||||||
template <class T, class I, class J, class OP>
|
template <class T, class I, class J, class OP>
|
||||||
class FlatMap : public IteratorBase<T>,
|
class FlatMap : public IteratorBase<T>,
|
||||||
public Composable<T, FlatMap<T, I, J, OP>>
|
public Composable<T, FlatMap<T, I, J, OP>>
|
||||||
|
@ -7,7 +7,7 @@ namespace iter
|
|||||||
{
|
{
|
||||||
|
|
||||||
// Class which maps values returned by I iterator into value of type T with OP
|
// Class which maps values returned by I iterator into value of type T with OP
|
||||||
// function and ends when op return empty optional.
|
// function and ends when op returns empty optional.
|
||||||
// T - type of return value
|
// T - type of return value
|
||||||
// I - iterator type
|
// I - iterator type
|
||||||
// OP - type of mapper function. OP: V -> Option<T>
|
// OP - type of mapper function. OP: V -> Option<T>
|
||||||
|
@ -5,8 +5,7 @@
|
|||||||
#include <ext/aligned_buffer.h>
|
#include <ext/aligned_buffer.h>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
// Optional object storage
|
// Optional object storage. It maybe has and maybe dosent have objet of type T.
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
class Option
|
class Option
|
||||||
{
|
{
|
||||||
@ -86,6 +85,7 @@ public:
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// True if object i present.
|
||||||
bool is_present() const { return initialized; }
|
bool is_present() const { return initialized; }
|
||||||
|
|
||||||
T &get() noexcept
|
T &get() noexcept
|
||||||
@ -103,6 +103,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns ref to object if present else other.
|
||||||
T const &get_or(T const &other) const
|
T const &get_or(T const &other) const
|
||||||
{
|
{
|
||||||
if (is_present()) {
|
if (is_present()) {
|
||||||
@ -203,3 +204,10 @@ auto make_option_const(const T &&data)
|
|||||||
{
|
{
|
||||||
return Option<const T>(std::move(data));
|
return Option<const T>(std::move(data));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HELPER FUNCTIONS
|
||||||
|
template <class R>
|
||||||
|
bool option_fill(Option<R> &o)
|
||||||
|
{
|
||||||
|
return o.is_present() && o.get().fill();
|
||||||
|
}
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
// Like option just for pointers. More efficent than option.
|
||||||
template <class T>
|
template <class T>
|
||||||
class OptionPtr
|
class OptionPtr
|
||||||
{
|
{
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include "utils/total_ordering.hpp"
|
#include "utils/total_ordering.hpp"
|
||||||
|
|
||||||
|
// Type which represents nothing.
|
||||||
class Void : public TotalOrdering<Void>
|
class Void : public TotalOrdering<Void>
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
@ -6,15 +6,18 @@
|
|||||||
// Implementations should follow the form:
|
// Implementations should follow the form:
|
||||||
// border_return_type border_class::method_name(arguments){
|
// border_return_type border_class::method_name(arguments){
|
||||||
// return
|
// return
|
||||||
// CALL(method_name(trans(arguments)))/HALF_CALL(method_name(trans(arguments)));
|
// CALL(method_name(trans(arguments)))
|
||||||
|
// or
|
||||||
|
// HALF_CALL(method_name(trans(arguments)));
|
||||||
// }
|
// }
|
||||||
|
|
||||||
// **************************** HELPER DEFINES *******************************//
|
// **************************** HELPER DEFINES *******************************//
|
||||||
// returns transformed pointer
|
// returns transformed this pointer
|
||||||
#define THIS (trans(this))
|
#define THIS (trans(this))
|
||||||
// Performs call x on transformed border class.
|
// In border class performs call x on original class.
|
||||||
#define HALF_CALL(x) (THIS->x)
|
#define HALF_CALL(x) (THIS->x)
|
||||||
// Performs call x on transformed border class and returns transformed output.
|
// In border class performs call x on original class and produces transformed
|
||||||
|
// output.
|
||||||
#define CALL(x) trans(HALF_CALL(x))
|
#define CALL(x) trans(HALF_CALL(x))
|
||||||
|
|
||||||
// Creates destructor for border type x which is original type y.
|
// Creates destructor for border type x which is original type y.
|
||||||
@ -58,24 +61,6 @@
|
|||||||
namespace barrier
|
namespace barrier
|
||||||
{
|
{
|
||||||
|
|
||||||
// ************************* EdgePropertyType
|
|
||||||
// #define FOR_ALL_PROPS_delete_EdgePropertyType(x) \
|
|
||||||
// template <> \
|
|
||||||
// EdgePropertyType<x>::~EdgePropertyType() \
|
|
||||||
// { \
|
|
||||||
// HALF_CALL(~PropertyTypeKey()); \
|
|
||||||
// }
|
|
||||||
// INSTANTIATE_FOR_PROPERTY(FOR_ALL_PROPS_delete_EdgePropertyType)
|
|
||||||
|
|
||||||
// ************************* VertexPropertyType
|
|
||||||
// #define FOR_ALL_PROPS_delete_VertexPropertyType(x) \
|
|
||||||
// template <> \
|
|
||||||
// VertexPropertyType<x>::~VertexPropertyType() \
|
|
||||||
// { \
|
|
||||||
// HALF_CALL(~PropertyTypeKey()); \
|
|
||||||
// }
|
|
||||||
// // INSTANTIATE_FOR_PROPERTY(FOR_ALL/_PROPS_delete_VertexPropertyType)
|
|
||||||
|
|
||||||
// ***************** Label
|
// ***************** Label
|
||||||
VertexIndex<std::nullptr_t> &Label::index() const { return CALL(index()); }
|
VertexIndex<std::nullptr_t> &Label::index() const { return CALL(index()); }
|
||||||
|
|
||||||
@ -542,12 +527,6 @@ void RecordStream<Stream>::write(const EdgeStoredProperty &prop)
|
|||||||
HALF_CALL(write(trans(prop)));
|
HALF_CALL(write(trans(prop)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// template <class Stream>
|
|
||||||
// void RecordStream<Stream>::write_null()
|
|
||||||
// {
|
|
||||||
// HALF_CALL(write_null());
|
|
||||||
// }
|
|
||||||
|
|
||||||
template <class Stream>
|
template <class Stream>
|
||||||
void RecordStream<Stream>::write(const Null &v)
|
void RecordStream<Stream>::write(const Null &v)
|
||||||
{
|
{
|
||||||
@ -699,31 +678,31 @@ template class RecordStream<io::Socket>;
|
|||||||
// **************************** ERROR EXAMPLES ****************************** //
|
// **************************** ERROR EXAMPLES ****************************** //
|
||||||
// **************************** COMPILE TIME
|
// **************************** COMPILE TIME
|
||||||
/*
|
/*
|
||||||
error:
|
### error:
|
||||||
../libmemgraph.a(barrier.cpp.o): In function `Option<barrier::VertexAccessor
|
../libmemgraph.a(barrier.cpp.o): In function `Option<barrier::VertexAccessor
|
||||||
const> Option<VertexAccessor const>::map<barrier::VertexAccessor const>()':
|
const> Option<VertexAccessor const>::map<barrier::VertexAccessor const>()':
|
||||||
/home/ktf/Workspace/memgraph/include/utils/option.hpp:111: undefined reference
|
/home/ktf/Workspace/memgraph/include/utils/option.hpp:111: undefined reference
|
||||||
to `barrier::VertexAccessor::VertexAccessor<VertexAccessor const>(VertexAccessor
|
to `barrier::VertexAccessor::VertexAccessor<VertexAccessor const>(VertexAccessor
|
||||||
const&&)'
|
const&&)'
|
||||||
|
|
||||||
description:
|
# description:
|
||||||
Constructor VertexAccessor<::VertexAccessor const>(::VertexAccessor const&&)
|
Constructor VertexAccessor<::VertexAccessor const>(::VertexAccessor const&&)
|
||||||
isn't written.
|
isn't written.
|
||||||
|
|
||||||
|
|
||||||
error:
|
### error:
|
||||||
../libmemgraph.a(barrier.cpp.o): In function `barrier::EdgeAccessor::from()
|
../libmemgraph.a(barrier.cpp.o): In function `barrier::EdgeAccessor::from()
|
||||||
const':
|
const':
|
||||||
/home/ktf/Workspace/memgraph/src/barrier/barrier.cpp:501: undefined reference to
|
/home/ktf/Workspace/memgraph/src/barrier/barrier.cpp:501: undefined reference to
|
||||||
`barrier::VertexAccessor::VertexAccessor<barrier::VertexAccessor
|
`barrier::VertexAccessor::VertexAccessor<barrier::VertexAccessor
|
||||||
const>(barrier::VertexAccessor const&&)'
|
const>(barrier::VertexAccessor const&&)'
|
||||||
|
|
||||||
description:
|
# description:
|
||||||
Move constructor VertexAccessor<VertexAccessor const>(VertexAccessor const&&)
|
Move constructor VertexAccessor<VertexAccessor const>(VertexAccessor const&&)
|
||||||
isn't defined.
|
isn't defined.
|
||||||
|
|
||||||
|
|
||||||
error:
|
### error:
|
||||||
/home/ktf/Workspace/memgraph/src/barrier/barrier.cpp:282:12: error: call to
|
/home/ktf/Workspace/memgraph/src/barrier/barrier.cpp:282:12: error: call to
|
||||||
'trans' is ambiguous
|
'trans' is ambiguous
|
||||||
return CALL(at(trans(key)));
|
return CALL(at(trans(key)));
|
||||||
@ -769,7 +748,7 @@ from macro 'TRANSFORM_REF'
|
|||||||
x const &trans(y const &l) { return ref_as<x const>(l); } \
|
x const &trans(y const &l) { return ref_as<x const>(l); } \
|
||||||
...
|
...
|
||||||
|
|
||||||
description:
|
# description:
|
||||||
There is no valid transformation for types on which trans is called.
|
There is no valid transformation for types on which trans is called.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
@ -13,7 +13,6 @@ DbAccessor::DbAccessor(Db &db, tx::Transaction &t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// VERTEX METHODS
|
// VERTEX METHODS
|
||||||
// auto DbAccessor::vertex_access()
|
|
||||||
|
|
||||||
Option<const VertexAccessor> DbAccessor::vertex_find(const Id &id)
|
Option<const VertexAccessor> DbAccessor::vertex_find(const Id &id)
|
||||||
{
|
{
|
||||||
@ -36,8 +35,11 @@ EdgeAccessor DbAccessor::edge_insert(VertexAccessor &from, VertexAccessor &to)
|
|||||||
{
|
{
|
||||||
auto edge_accessor = db_transaction.db.graph.edges.insert(
|
auto edge_accessor = db_transaction.db.graph.edges.insert(
|
||||||
db_transaction, from.vlist, to.vlist);
|
db_transaction, from.vlist, to.vlist);
|
||||||
|
|
||||||
|
// Connect edge with from,to vertices.
|
||||||
from->data.out.add(edge_accessor.vlist);
|
from->data.out.add(edge_accessor.vlist);
|
||||||
to->data.in.add(edge_accessor.vlist);
|
to->data.in.add(edge_accessor.vlist);
|
||||||
|
|
||||||
return edge_accessor;
|
return edge_accessor;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,8 +48,11 @@ EdgeAccessor DbAccessor::edge_insert(VertexAccessor const &from,
|
|||||||
{
|
{
|
||||||
auto edge_accessor = db_transaction.db.graph.edges.insert(
|
auto edge_accessor = db_transaction.db.graph.edges.insert(
|
||||||
db_transaction, from.vlist, to.vlist);
|
db_transaction, from.vlist, to.vlist);
|
||||||
|
|
||||||
|
// Connect edge with updated from,to vertices.
|
||||||
from.update()->data.out.add(edge_accessor.vlist);
|
from.update()->data.out.add(edge_accessor.vlist);
|
||||||
to.update()->data.in.add(edge_accessor.vlist);
|
to.update()->data.in.add(edge_accessor.vlist);
|
||||||
|
|
||||||
return edge_accessor;
|
return edge_accessor;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,6 +112,7 @@ bool DbAccessor::commit()
|
|||||||
db_transaction.trans.commit();
|
db_transaction.trans.commit();
|
||||||
return true;
|
return true;
|
||||||
} else {
|
} else {
|
||||||
|
// Index update wasn't successfull so whe are aborting transaction.
|
||||||
db_transaction.trans.abort();
|
db_transaction.trans.abort();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
|
Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
|
||||||
: dbms(dbs), cleaning_cycle(cleaning_cycle)
|
: dbms(dbs), cleaning_cycle(cleaning_cycle)
|
||||||
{
|
{
|
||||||
|
// Start the cleaning thread
|
||||||
cleaners.push_back(
|
cleaners.push_back(
|
||||||
std::make_unique<Thread>([&, cleaning_cycle = cleaning_cycle ]() {
|
std::make_unique<Thread>([&, cleaning_cycle = cleaning_cycle ]() {
|
||||||
Logger logger = logging::log->logger("Cleaner");
|
Logger logger = logging::log->logger("Cleaner");
|
||||||
@ -22,8 +23,11 @@ Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
|
|||||||
while (cleaning.load(std::memory_order_acquire)) {
|
while (cleaning.load(std::memory_order_acquire)) {
|
||||||
std::time_t now = std::time(nullptr);
|
std::time_t now = std::time(nullptr);
|
||||||
|
|
||||||
|
// Maybe it's cleaning time.
|
||||||
if (now >= last_clean + cleaning_cycle) {
|
if (now >= last_clean + cleaning_cycle) {
|
||||||
logger.info("Started cleaning cyle");
|
logger.info("Started cleaning cyle");
|
||||||
|
|
||||||
|
// Clean all databases
|
||||||
for (auto &db : dbs.access()) {
|
for (auto &db : dbs.access()) {
|
||||||
logger.info("Cleaning database \"{}\"", db.first);
|
logger.info("Cleaning database \"{}\"", db.first);
|
||||||
DbTransaction t(db.second);
|
DbTransaction t(db.second);
|
||||||
@ -43,11 +47,15 @@ Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
|
|||||||
db.first);
|
db.first);
|
||||||
logger.error("{}", e.what());
|
logger.error("{}", e.what());
|
||||||
}
|
}
|
||||||
|
// NOTE: Whe should commit even if error occured.
|
||||||
t.trans.commit();
|
t.trans.commit();
|
||||||
}
|
}
|
||||||
last_clean = now;
|
last_clean = now;
|
||||||
logger.info("Finished cleaning cyle");
|
logger.info("Finished cleaning cyle");
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
// Cleaning isn't scheduled for now so i should sleep.
|
||||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -56,8 +64,10 @@ Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
|
|||||||
|
|
||||||
Cleaning::~Cleaning()
|
Cleaning::~Cleaning()
|
||||||
{
|
{
|
||||||
|
// Stop cleaning
|
||||||
cleaning.store(false, std::memory_order_release);
|
cleaning.store(false, std::memory_order_release);
|
||||||
for (auto &t : cleaners) {
|
for (auto &t : cleaners) {
|
||||||
|
// Join with cleaners
|
||||||
t.get()->join();
|
t.get()->join();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,6 +5,7 @@ Db &Dbms::active()
|
|||||||
{
|
{
|
||||||
Db *active = active_db.load(std::memory_order_acquire);
|
Db *active = active_db.load(std::memory_order_acquire);
|
||||||
if (UNLIKELY(active == nullptr)) {
|
if (UNLIKELY(active == nullptr)) {
|
||||||
|
// There is no active database.
|
||||||
return create_default();
|
return create_default();
|
||||||
} else {
|
} else {
|
||||||
return *active;
|
return *active;
|
||||||
@ -19,6 +20,8 @@ Db &Dbms::active(const std::string &name)
|
|||||||
// create db if it doesn't exist
|
// create db if it doesn't exist
|
||||||
auto it = acc.find(name);
|
auto it = acc.find(name);
|
||||||
if (it == acc.end()) {
|
if (it == acc.end()) {
|
||||||
|
|
||||||
|
// It doesn't exist.
|
||||||
Snapshoter &snap = snapshoter;
|
Snapshoter &snap = snapshoter;
|
||||||
it = acc.emplace(name, std::forward_as_tuple(name),
|
it = acc.emplace(name, std::forward_as_tuple(name),
|
||||||
std::forward_as_tuple(name))
|
std::forward_as_tuple(name))
|
||||||
|
@ -21,8 +21,11 @@ bool SnapshotEngine::make_snapshot()
|
|||||||
std::lock_guard<std::mutex> lock(guard);
|
std::lock_guard<std::mutex> lock(guard);
|
||||||
std::time_t now = std::time(nullptr);
|
std::time_t now = std::time(nullptr);
|
||||||
if (make_snapshot(now, "full")) {
|
if (make_snapshot(now, "full")) {
|
||||||
|
// Sanpsthot was created so whe should check if some older snapshots
|
||||||
|
// should be deleted.
|
||||||
clean_snapshots();
|
clean_snapshots();
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -31,6 +34,8 @@ bool SnapshotEngine::make_snapshot()
|
|||||||
void SnapshotEngine::clean_snapshots()
|
void SnapshotEngine::clean_snapshots()
|
||||||
{
|
{
|
||||||
logger.info("Started cleaning commit_file");
|
logger.info("Started cleaning commit_file");
|
||||||
|
// Whe first count the number of snapshots that whe know about in commit
|
||||||
|
// file.
|
||||||
std::vector<std::string> lines;
|
std::vector<std::string> lines;
|
||||||
{
|
{
|
||||||
std::ifstream commit_file(snapshot_commit_file());
|
std::ifstream commit_file(snapshot_commit_file());
|
||||||
@ -43,14 +48,19 @@ void SnapshotEngine::clean_snapshots()
|
|||||||
|
|
||||||
int n = lines.size() - max_retained_snapshots;
|
int n = lines.size() - max_retained_snapshots;
|
||||||
if (n > 0) {
|
if (n > 0) {
|
||||||
|
// Whe have to much snapshots so whe should delete some.
|
||||||
std::ofstream commit_file(snapshot_commit_file(), std::fstream::trunc);
|
std::ofstream commit_file(snapshot_commit_file(), std::fstream::trunc);
|
||||||
|
|
||||||
|
// First whw will rewrite commit file to contain only
|
||||||
|
// max_retained_snapshots newest snapshots.
|
||||||
for (auto i = n; i < lines.size(); i++) {
|
for (auto i = n; i < lines.size(); i++) {
|
||||||
commit_file << lines[i] << std::endl;
|
commit_file << lines[i] << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto res = sys::flush_file_to_disk(commit_file);
|
auto res = sys::flush_file_to_disk(commit_file);
|
||||||
if (res == 0) {
|
if (res == 0) {
|
||||||
|
// Commit file was succesfully changed so whe can now delete
|
||||||
|
// snapshots which whe evicted from commit file.
|
||||||
commit_file.close();
|
commit_file.close();
|
||||||
logger.info("Removed {} snapshot from commit_file", n);
|
logger.info("Removed {} snapshot from commit_file", n);
|
||||||
|
|
||||||
@ -93,12 +103,16 @@ bool SnapshotEngine::make_snapshot(std::time_t now, const char *type)
|
|||||||
auto old_trans =
|
auto old_trans =
|
||||||
tx::TransactionRead(db.tx_engine); // Overenginered for incremental
|
tx::TransactionRead(db.tx_engine); // Overenginered for incremental
|
||||||
// snapshot. Can be removed.
|
// snapshot. Can be removed.
|
||||||
|
|
||||||
|
// Everything is ready for creation of snapshot.
|
||||||
snapshot(t, snap, old_trans);
|
snapshot(t, snap, old_trans);
|
||||||
|
|
||||||
auto res = sys::flush_file_to_disk(snapshot_file);
|
auto res = sys::flush_file_to_disk(snapshot_file);
|
||||||
if (res == 0) {
|
if (res == 0) {
|
||||||
|
// Snapshot was succesfully written to disk.
|
||||||
t.trans.commit();
|
t.trans.commit();
|
||||||
success = true;
|
success = true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
logger.error("Error {} occured while flushing snapshot file", res);
|
logger.error("Error {} occured while flushing snapshot file", res);
|
||||||
t.trans.abort();
|
t.trans.abort();
|
||||||
@ -112,6 +126,8 @@ bool SnapshotEngine::make_snapshot(std::time_t now, const char *type)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (success) {
|
if (success) {
|
||||||
|
// Snapshot was succesfully created but for it to be reachable for
|
||||||
|
// import whe must add it to the end of commit file.
|
||||||
std::ofstream commit_file(snapshot_commit_file(), std::fstream::app);
|
std::ofstream commit_file(snapshot_commit_file(), std::fstream::app);
|
||||||
|
|
||||||
commit_file << snapshot_file_name << std::endl;
|
commit_file << snapshot_file_name << std::endl;
|
||||||
@ -120,6 +136,8 @@ bool SnapshotEngine::make_snapshot(std::time_t now, const char *type)
|
|||||||
if (res == 0) {
|
if (res == 0) {
|
||||||
commit_file.close();
|
commit_file.close();
|
||||||
snapshoted_no_v.fetch_add(1);
|
snapshoted_no_v.fetch_add(1);
|
||||||
|
// Snapshot was succesfully commited.
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
logger.error("Error {} occured while flushing commit file", res);
|
logger.error("Error {} occured while flushing commit file", res);
|
||||||
}
|
}
|
||||||
@ -139,6 +157,7 @@ bool SnapshotEngine::import()
|
|||||||
|
|
||||||
std::ifstream commit_file(snapshot_commit_file());
|
std::ifstream commit_file(snapshot_commit_file());
|
||||||
|
|
||||||
|
// Whe first load all known snpashot file names from commit file.
|
||||||
std::vector<std::string> snapshots;
|
std::vector<std::string> snapshots;
|
||||||
std::string line;
|
std::string line;
|
||||||
while (std::getline(commit_file, line)) {
|
while (std::getline(commit_file, line)) {
|
||||||
@ -166,8 +185,7 @@ bool SnapshotEngine::import()
|
|||||||
|
|
||||||
} else {
|
} else {
|
||||||
logger.info("Unuccesfully tryed to import snapshot "
|
logger.info("Unuccesfully tryed to import snapshot "
|
||||||
"\"{}\" because indexes where unuccesfully "
|
"\"{}\"",
|
||||||
"with updating",
|
|
||||||
snapshots.back());
|
snapshots.back());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,6 +197,7 @@ bool SnapshotEngine::import()
|
|||||||
}
|
}
|
||||||
|
|
||||||
snapshots.pop_back();
|
snapshots.pop_back();
|
||||||
|
// Whe will try to import older snapashot if such one exist.
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch (const std::exception &e) {
|
} catch (const std::exception &e) {
|
||||||
@ -289,11 +308,13 @@ void SnapshotEngine::add_indexes(std::vector<IndexDefinition> &v)
|
|||||||
std::string SnapshotEngine::snapshot_file(std::time_t const &now,
|
std::string SnapshotEngine::snapshot_file(std::time_t const &now,
|
||||||
const char *type)
|
const char *type)
|
||||||
{
|
{
|
||||||
|
// Current nano time less than second.
|
||||||
auto now_nano = std::chrono::time_point_cast<std::chrono::nanoseconds>(
|
auto now_nano = std::chrono::time_point_cast<std::chrono::nanoseconds>(
|
||||||
std::chrono::high_resolution_clock::now())
|
std::chrono::high_resolution_clock::now())
|
||||||
.time_since_epoch()
|
.time_since_epoch()
|
||||||
.count() %
|
.count() %
|
||||||
(1000 * 1000 * 1000);
|
(1000 * 1000 * 1000);
|
||||||
|
|
||||||
return snapshot_db_dir() + "/" + std::to_string(now) + "_" +
|
return snapshot_db_dir() + "/" + std::to_string(now) + "_" +
|
||||||
std::to_string(now_nano) + "_" + type;
|
std::to_string(now_nano) + "_" + type;
|
||||||
}
|
}
|
||||||
@ -308,9 +329,11 @@ std::string SnapshotEngine::snapshot_db_dir()
|
|||||||
if (!sys::ensure_directory_exists(snapshot_folder)) {
|
if (!sys::ensure_directory_exists(snapshot_folder)) {
|
||||||
logger.error("Error while creating directory \"{}\"", snapshot_folder);
|
logger.error("Error while creating directory \"{}\"", snapshot_folder);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto db_path = snapshot_folder + "/" + db.name();
|
auto db_path = snapshot_folder + "/" + db.name();
|
||||||
if (!sys::ensure_directory_exists(db_path)) {
|
if (!sys::ensure_directory_exists(db_path)) {
|
||||||
logger.error("Error while creating directory \"{}\"", db_path);
|
logger.error("Error while creating directory \"{}\"", db_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
return db_path;
|
return db_path;
|
||||||
}
|
}
|
||||||
|
@ -12,6 +12,7 @@ Snapshoter::Snapshoter(ConcurrentMap<std::string, Db> &dbs,
|
|||||||
size_t snapshot_cycle)
|
size_t snapshot_cycle)
|
||||||
: snapshot_cycle(snapshot_cycle), dbms(dbs)
|
: snapshot_cycle(snapshot_cycle), dbms(dbs)
|
||||||
{
|
{
|
||||||
|
// Start snapshoter thread.
|
||||||
thread = std::make_unique<Thread>([&]() {
|
thread = std::make_unique<Thread>([&]() {
|
||||||
logger = logging::log->logger("Snapshoter");
|
logger = logging::log->logger("Snapshoter");
|
||||||
logger.info("Started with snapshoot cycle of {} sec",
|
logger.info("Started with snapshoot cycle of {} sec",
|
||||||
@ -46,7 +47,9 @@ void Snapshoter::run()
|
|||||||
make_snapshots();
|
make_snapshots();
|
||||||
|
|
||||||
last_snapshot = now;
|
last_snapshot = now;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
// It isn't time for snapshot so i should wait.
|
||||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
void EdgeAccessor::remove() const
|
void EdgeAccessor::remove() const
|
||||||
{
|
{
|
||||||
RecordAccessor::remove();
|
RecordAccessor::remove();
|
||||||
|
|
||||||
auto from_v = from();
|
auto from_v = from();
|
||||||
bool f_from = from_v.fill();
|
bool f_from = from_v.fill();
|
||||||
assert(f_from);
|
assert(f_from);
|
||||||
@ -15,6 +16,7 @@ void EdgeAccessor::remove() const
|
|||||||
bool f_to = to_v.fill();
|
bool f_to = to_v.fill();
|
||||||
assert(f_to);
|
assert(f_to);
|
||||||
|
|
||||||
|
// Detach edge from vertices.
|
||||||
from_v.update().record->data.out.remove(vlist);
|
from_v.update().record->data.out.remove(vlist);
|
||||||
to_v.update().record->data.in.remove(vlist);
|
to_v.update().record->data.in.remove(vlist);
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,8 @@ void Garbage::clean()
|
|||||||
{
|
{
|
||||||
for (auto it = gar.begin(); it != gar.end(); it++) {
|
for (auto it = gar.begin(); it != gar.end(); it++) {
|
||||||
if (it->first.all_finished(engine) && it.remove()) {
|
if (it->first.all_finished(engine) && it.remove()) {
|
||||||
|
// All transactions who could have seen data are finished and this
|
||||||
|
// thread successfull removed item from list.
|
||||||
it->second->~DeleteSensitive();
|
it->second->~DeleteSensitive();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -51,10 +51,14 @@ auto NonUniqueUnorderedIndex<T, K>::for_range_exact(DbAccessor &t_v,
|
|||||||
it = list.cbegin(), end = list.cend(), from = from_v, to = to_v,
|
it = list.cbegin(), end = list.cend(), from = from_v, to = to_v,
|
||||||
t = t_v
|
t = t_v
|
||||||
]() mutable->auto {
|
]() mutable->auto {
|
||||||
|
// NonUniqueUnorderedIndex is stupid so it must iterate through all
|
||||||
|
// index records to determine which are iniside borders.
|
||||||
while (it != end) {
|
while (it != end) {
|
||||||
const IndexRecord<T, K> &r = *it;
|
const IndexRecord<T, K> &r = *it;
|
||||||
if (from < r.key && to > r.key &&
|
if (from < r.key && to > r.key &&
|
||||||
r.is_valid(t.db_transaction.trans)) {
|
r.is_valid(t.db_transaction.trans)) {
|
||||||
|
// record r is inside borders and is valid for current
|
||||||
|
// transaction.
|
||||||
const typename T::accessor_t acc =
|
const typename T::accessor_t acc =
|
||||||
r.access(t.db_transaction);
|
r.access(t.db_transaction);
|
||||||
it++;
|
it++;
|
||||||
|
@ -57,9 +57,13 @@ auto UniqueOrderedIndex<T, K>::for_range_exact(DbAccessor &t_v,
|
|||||||
// Sorted order must be checked
|
// Sorted order must be checked
|
||||||
if (this->type().order == Ascending && from_v.key.is_present()) {
|
if (this->type().order == Ascending && from_v.key.is_present()) {
|
||||||
begin = acc.cfind_or_larger(from_v);
|
begin = acc.cfind_or_larger(from_v);
|
||||||
|
|
||||||
} else if (this->type().order == Descending && to_v.key.is_present()) {
|
} else if (this->type().order == Descending && to_v.key.is_present()) {
|
||||||
|
// Order is descending so whe have to start from the end border and
|
||||||
|
// iterate to the from border.
|
||||||
begin = acc.cfind_or_larger(to_v);
|
begin = acc.cfind_or_larger(to_v);
|
||||||
end = from_v;
|
end = from_v;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
assert(this->type().order != None);
|
assert(this->type().order != None);
|
||||||
}
|
}
|
||||||
@ -71,9 +75,15 @@ auto UniqueOrderedIndex<T, K>::for_range_exact(DbAccessor &t_v,
|
|||||||
it = std::move(begin), b_end = std::move(end), t = t_v,
|
it = std::move(begin), b_end = std::move(end), t = t_v,
|
||||||
hold_acc = std::move(acc)
|
hold_acc = std::move(acc)
|
||||||
]() mutable->auto {
|
]() mutable->auto {
|
||||||
|
// UniqueOrderedIndex is smart so he has to iterate only through
|
||||||
|
// records which are inside borders. He knows that he will start
|
||||||
|
// with items larger than from_v but he needs to check if it has
|
||||||
|
// reached end border.
|
||||||
while (b_end >= it->key) {
|
while (b_end >= it->key) {
|
||||||
const IndexRecord<T, K> &r = *it;
|
const IndexRecord<T, K> &r = *it;
|
||||||
if (r.is_valid(t.db_transaction.trans)) {
|
if (r.is_valid(t.db_transaction.trans)) {
|
||||||
|
// record r is inside borders and is valid for current
|
||||||
|
// transaction.
|
||||||
const typename T::accessor_t acc =
|
const typename T::accessor_t acc =
|
||||||
r.access(t.db_transaction);
|
r.access(t.db_transaction);
|
||||||
it++;
|
it++;
|
||||||
|
@ -12,6 +12,7 @@ bool IndexHolder<TG, K>::set_index(std::unique_ptr<IndexBase<TG, K>> inx)
|
|||||||
if (index.compare_exchange_strong(npr<TG, K>, inx.get())) {
|
if (index.compare_exchange_strong(npr<TG, K>, inx.get())) {
|
||||||
inx.release();
|
inx.release();
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -23,6 +24,7 @@ OptionPtr<IndexBase<TG, K>> IndexHolder<TG, K>::get_read() const
|
|||||||
auto loaded = index.load(std::memory_order_acquire);
|
auto loaded = index.load(std::memory_order_acquire);
|
||||||
if (loaded == nullptr || !loaded->can_read()) {
|
if (loaded == nullptr || !loaded->can_read()) {
|
||||||
return OptionPtr<IndexBase<TG, K>>();
|
return OptionPtr<IndexBase<TG, K>>();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return make_option_ptr(loaded);
|
return make_option_ptr(loaded);
|
||||||
}
|
}
|
||||||
@ -35,6 +37,7 @@ IndexHolder<TG, K>::get_write(const tx::Transaction &t) const
|
|||||||
auto loaded = index.load(std::memory_order_acquire);
|
auto loaded = index.load(std::memory_order_acquire);
|
||||||
if (loaded == nullptr || !loaded->is_obliged_to_insert(t)) {
|
if (loaded == nullptr || !loaded->is_obliged_to_insert(t)) {
|
||||||
return OptionPtr<IndexBase<TG, K>>();
|
return OptionPtr<IndexBase<TG, K>>();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return make_option_ptr(loaded);
|
return make_option_ptr(loaded);
|
||||||
}
|
}
|
||||||
@ -46,6 +49,7 @@ IndexHolder<TG, K>::remove_index(IndexBase<TG, K> *expected)
|
|||||||
{
|
{
|
||||||
if (index.compare_exchange_strong(expected, nullptr)) {
|
if (index.compare_exchange_strong(expected, nullptr)) {
|
||||||
return make_option(std::unique_ptr<IndexBase<TG, K>>(expected));
|
return make_option(std::unique_ptr<IndexBase<TG, K>>(expected));
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return make_option(std::unique_ptr<IndexBase<TG, K>>());
|
return make_option(std::unique_ptr<IndexBase<TG, K>>());
|
||||||
}
|
}
|
||||||
@ -57,6 +61,7 @@ Option<std::unique_ptr<IndexBase<TG, K>>> IndexHolder<TG, K>::remove_index()
|
|||||||
auto removed = index.exchange(nullptr);
|
auto removed = index.exchange(nullptr);
|
||||||
if (removed == nullptr) {
|
if (removed == nullptr) {
|
||||||
return make_option<std::unique_ptr<IndexBase<TG, K>>>();
|
return make_option<std::unique_ptr<IndexBase<TG, K>>>();
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
return make_option(std::unique_ptr<IndexBase<TG, K>>(removed));
|
return make_option(std::unique_ptr<IndexBase<TG, K>>(removed));
|
||||||
}
|
}
|
||||||
|
@ -11,7 +11,7 @@ bool Indexes::add_index(IndexDefinition id)
|
|||||||
std::function<bool(DbTransaction &)> finish = [](auto &t) { return false; };
|
std::function<bool(DbTransaction &)> finish = [](auto &t) { return false; };
|
||||||
|
|
||||||
// Creates transaction and during it's creation adds index into it's
|
// Creates transaction and during it's creation adds index into it's
|
||||||
// place. Also created finish closure which will add necessary elements
|
// place. Also creates finish closure which will add necessary elements
|
||||||
// into index.
|
// into index.
|
||||||
DbTransaction t(db, db.tx_engine.begin([&](auto &t) mutable {
|
DbTransaction t(db, db.tx_engine.begin([&](auto &t) mutable {
|
||||||
size_t code = id.loc.location_code();
|
size_t code = id.loc.location_code();
|
||||||
|
@ -35,8 +35,7 @@ void Properties<TG>::set(StoredProperty<TG> &&value)
|
|||||||
for (auto &prop : props) {
|
for (auto &prop : props) {
|
||||||
if (prop.key == value.key) {
|
if (prop.key == value.key) {
|
||||||
// It is necessary to change key because the types from before and
|
// It is necessary to change key because the types from before and
|
||||||
// now
|
// now could be different.
|
||||||
// could be different.
|
|
||||||
StoredProperty<TG> &sp = const_cast<StoredProperty<TG> &>(prop);
|
StoredProperty<TG> &sp = const_cast<StoredProperty<TG> &>(prop);
|
||||||
sp = std::move(value);
|
sp = std::move(value);
|
||||||
return;
|
return;
|
||||||
|
@ -53,16 +53,27 @@ void VertexAccessor::remove() const
|
|||||||
{
|
{
|
||||||
RecordAccessor::remove();
|
RecordAccessor::remove();
|
||||||
|
|
||||||
|
// Detach all out edges.
|
||||||
for (auto evr : record->data.out) {
|
for (auto evr : record->data.out) {
|
||||||
auto ea = EdgeAccessor(evr, db);
|
auto ea = EdgeAccessor(evr, db);
|
||||||
|
|
||||||
|
// Delete edge
|
||||||
ea.vlist->remove(db.trans);
|
ea.vlist->remove(db.trans);
|
||||||
|
|
||||||
|
// Remove edge from it's to vertex.
|
||||||
auto to_v = ea.to();
|
auto to_v = ea.to();
|
||||||
to_v.fill();
|
to_v.fill();
|
||||||
to_v.update().record->data.in.remove(ea.vlist);
|
to_v.update().record->data.in.remove(ea.vlist);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Detach all in edges.
|
||||||
for (auto evr : record->data.in) {
|
for (auto evr : record->data.in) {
|
||||||
auto ea = EdgeAccessor(evr, db);
|
auto ea = EdgeAccessor(evr, db);
|
||||||
|
|
||||||
|
// Delete edge
|
||||||
ea.vlist->remove(db.trans);
|
ea.vlist->remove(db.trans);
|
||||||
|
|
||||||
|
// Remove edge from it's from vertex.
|
||||||
auto from_v = ea.from();
|
auto from_v = ea.from();
|
||||||
from_v.fill();
|
from_v.fill();
|
||||||
from_v.update().record->data.out.remove(ea.vlist);
|
from_v.update().record->data.out.remove(ea.vlist);
|
||||||
|
@ -61,7 +61,7 @@ void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
|
|||||||
{
|
{
|
||||||
DbAccessor t(db);
|
DbAccessor t(db);
|
||||||
|
|
||||||
t.vertex_access().fill().for_all([&](auto va) { va.set(prop); });
|
t.vertex_access().fill().update().for_all([&](auto va) { va.set(prop); });
|
||||||
|
|
||||||
assert(t.commit());
|
assert(t.commit());
|
||||||
}
|
}
|
||||||
@ -73,7 +73,7 @@ void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
|
|||||||
auto key = f.get(Int64::type).family_key();
|
auto key = f.get(Int64::type).family_key();
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
t.vertex_access().fill().for_all([&](auto va) mutable {
|
t.vertex_access().fill().update().for_all([&](auto va) mutable {
|
||||||
va.set(StoredProperty<TypeGroupVertex>(Int64(i), key));
|
va.set(StoredProperty<TypeGroupVertex>(Int64(i), key));
|
||||||
i++;
|
i++;
|
||||||
});
|
});
|
||||||
@ -88,7 +88,7 @@ void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
|
|||||||
auto key = f.get(Int64::type).family_key();
|
auto key = f.get(Int64::type).family_key();
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
t.edge_access().fill().for_all([&](auto va) mutable {
|
t.edge_access().fill().update().for_all([&](auto va) mutable {
|
||||||
va.set(StoredProperty<TypeGroupEdge>(Int64(i), key));
|
va.set(StoredProperty<TypeGroupEdge>(Int64(i), key));
|
||||||
i++;
|
i++;
|
||||||
});
|
});
|
||||||
@ -186,7 +186,7 @@ int main(void)
|
|||||||
logging::init_async();
|
logging::init_async();
|
||||||
logging::log->pipe(std::make_unique<Stdout>());
|
logging::log->pipe(std::make_unique<Stdout>());
|
||||||
|
|
||||||
size_t cvl_n = 1000;
|
size_t cvl_n = 1;
|
||||||
|
|
||||||
std::string create_vertex_label =
|
std::string create_vertex_label =
|
||||||
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
|
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
|
||||||
@ -223,11 +223,22 @@ int main(void)
|
|||||||
Db db("index", false);
|
Db db("index", false);
|
||||||
assert(db.indexes().add_index(vertex_property_nonunique_unordered));
|
assert(db.indexes().add_index(vertex_property_nonunique_unordered));
|
||||||
assert(db.indexes().add_index(edge_property_nonunique_unordered));
|
assert(db.indexes().add_index(edge_property_nonunique_unordered));
|
||||||
|
|
||||||
run(cvl_n, create_vertex_label, db);
|
run(cvl_n, create_vertex_label, db);
|
||||||
add_edge(cvl_n, db);
|
auto sp = StoredProperty<TypeGroupVertex>(
|
||||||
|
Int64(0), db.graph.vertices.property_family_find_or_create("prop")
|
||||||
|
.get(Int64::type)
|
||||||
|
.family_key());
|
||||||
|
add_property(db, sp);
|
||||||
|
|
||||||
assert(cvl_n ==
|
assert(cvl_n ==
|
||||||
size(db, db.graph.vertices.property_family_find_or_create("prop")
|
size(db, db.graph.vertices.property_family_find_or_create("prop")
|
||||||
.index));
|
.index));
|
||||||
|
|
||||||
|
add_edge(cvl_n, db);
|
||||||
|
add_edge_property_serial_int(
|
||||||
|
db, db.graph.edges.property_family_find_or_create("prop"));
|
||||||
|
|
||||||
assert(
|
assert(
|
||||||
cvl_n ==
|
cvl_n ==
|
||||||
size(db,
|
size(db,
|
||||||
|
Loading…
Reference in New Issue
Block a user