Added documentation.

Fixed test for index.
This commit is contained in:
Kruno Tomola Fabro 2016-09-18 23:22:36 +01:00
parent ee23b0204d
commit d806d635f9
78 changed files with 564 additions and 337 deletions

View File

@ -10,8 +10,8 @@
// for querys.
namespace barrier
{
// Every class which must be visible to outside the barrier should have there
// barrier class defined here.
// Every class from database which must be visible to outside the barrier should
// have there barrier class defined here.
// ************ Here should be forward declarations of Sized barrier classes
// ACCESSORS
@ -298,7 +298,8 @@ public:
Count count();
};
// TODO: Find reasons of such great size ant try to decrease it.
// NOTE: This large size is because of SkipList accessor which is embeded into
// iterator. The accessor has 64 fields of pointers which is in total 512 B.
class VertexAccessIterator
: public Sized<560, 8>,
public iter::Composable<const VertexAccessor, VertexAccessIterator>
@ -318,7 +319,8 @@ public:
Count count();
};
// TODO: Find reasons of such great size ant try to decrease it.
// NOTE: This large size is because of SkipList accessor which is embeded into
// iterator. The accessor has 64 fields of pointers which is in total 512 B.
class EdgeAccessIterator
: public Sized<560, 8>,
public iter::Composable<const EdgeAccessor, EdgeAccessIterator>
@ -476,7 +478,6 @@ public:
void write(const EdgeAccessor &edge);
void write(const VertexStoredProperty &prop);
void write(const EdgeStoredProperty &prop);
void write_null();
void write(const Null &v);
void write(const Bool &prop);
void write(const Float &prop);

View File

@ -6,7 +6,7 @@
#include <utility>
#include <vector>
// THis shoul be the only place to include code from memgraph other than
// This shoul be the only place to include code from memgraph other than
// barrier.cpp
#include "mvcc/id.hpp"
#include "storage/indexes/index_definition.hpp"
@ -28,30 +28,35 @@ namespace barrier
x(ArrayInt64) x(ArrayFloat) x(ArrayDouble) x(ArrayBool) x(ArrayString)
// **************************** HELPER FUNCTIONS **************************** //
// CASTS FROM& -> TO&
template <class TO, class FROM>
TO &ref_as(FROM &ref)
{
return (*reinterpret_cast<TO *>(&ref));
}
// CASTS FROM const& -> TO const&
template <class TO, class FROM>
TO const &ref_as(FROM const &ref)
{
return (*reinterpret_cast<TO const *>(&ref));
}
// CASTS FROM* -> TO*
template <class TO, class FROM>
TO *ptr_as(FROM *ref)
{
return (reinterpret_cast<TO *>(ref));
}
// CASTS FROM const* -> TO const*
template <class TO, class FROM>
TO const *ptr_as(FROM const *ref)
{
return (reinterpret_cast<TO const *>(ref));
}
// CASTS FROM&& -> TO&&
template <class TO, class FROM>
TO &&value_as(FROM &&ref)
{
@ -61,6 +66,7 @@ TO &&value_as(FROM &&ref)
return (reinterpret_cast<TO &&>(std::move(ref)));
}
// CASTS FROM const&& -> TO const&&
template <class TO, class FROM>
const TO &&value_as(const FROM &&ref)
{
@ -71,7 +77,8 @@ const TO &&value_as(const FROM &&ref)
}
// Barrier classes which will be used only through reference/pointer should
// inherit this class.
// inherit this class. Outside of barrier derived classes will be used only
// through reference/pointer.
class Unsized
{
public:
@ -95,7 +102,7 @@ class Sized
{
protected:
// This will ensure that this/derived class can't be instantiated.
// This way side outside the barrier can't "accidentaly" create this/derived
// Something outside the barrier can't "accidentaly" create this/derived
// type because that would be erroneous.
Sized() = delete;
@ -123,28 +130,9 @@ protected:
"Border class aligment mismatch");
}
public:
typename std::aligned_storage<size_B, alignment_B>::type &_data_ref()
{
return data;
}
typename std::aligned_storage<size_B, alignment_B>::type const &
_data_ref_const() const
{
return data;
}
private:
// Here is the aligned storage which imitates size and aligment of object of
// original class from memgraph.
typename std::aligned_storage<size_B, alignment_B>::type data;
};
// HELPER FUNCTIONS
template <class R>
bool option_fill(Option<R> &o)
{
return o.is_present() && o.get().fill();
}
}

View File

@ -160,6 +160,10 @@ namespace barrier
// Blueprint for valid transformation of references:
// TRANSFORM_REF(, ::);
// template <class T> TRANSFORM_REF_TEMPLATED(<T>,::<T>);
// TODO: Strongest assurance that evertyhing is correct is for all of following
// transformation to use DUP for defining theres transformation. This would mean
// that names of classes exported in barrier and names from real class in
// database are equal.
// ***************** TRANSFORMS of reference and pointers
DUP(Label, TRANSFORM_REF);

View File

@ -5,6 +5,8 @@
using std::pair;
// Item stored in skiplist. Used by ConcurrentMap and ConcurrentMultiMap to
// store key and value but to make ordering on keys.
template <typename K, typename T>
class Item : public TotalOrdering<Item<K, T>>,
public TotalOrdering<K, Item<K, T>>,
@ -45,6 +47,8 @@ public:
}
};
// Common base for accessor of all derived containers(ConcurrentMap,
// ConcurrentSet, ...) from SkipList.
template <typename T>
class AccessorBase
{

View File

@ -5,7 +5,8 @@
#include <utility>
#include "utils/crtp.hpp"
// TODO: reimplement this
// TODO: reimplement this. It's correct but somewhat inefecient and it could be
// done better.
template <class T>
class ConcurrentList
{
@ -24,7 +25,7 @@ private:
template <class V>
static bool cas(std::atomic<V> &atomic, V expected, V desired)
{ // Could be relaxed must be atleast Release.
{ // Could be relaxed but must be at least Release.
return atomic.compare_exchange_strong(expected, desired,
std::memory_order_seq_cst);
}
@ -35,18 +36,28 @@ private:
return atomic.exchange(desired, std::memory_order_seq_cst);
}
// Basic element in a ConcurrentList
class Node
{
public:
Node(const T &data) : data(data) {}
Node(T &&data) : data(std::move(data)) {}
// Carried data
T data;
// Next element in list or nullptr if end.
std::atomic<Node *> next{nullptr};
// Next removed element in list or nullptr if end.
std::atomic<Node *> next_rem{nullptr};
// True if node has logicaly been removed from list.
std::atomic<bool> removed{false};
};
// Base for Mutable and Immutable iterators. Also serves as accessor to the
// list uses for safe garbage disposall.
template <class It>
class IteratorBase : public Crtp<It>
{
@ -58,7 +69,9 @@ private:
IteratorBase(ConcurrentList *list) : list(list)
{
assert(list != nullptr);
// Increment number of iterators accessing list.
list->count++;
// Start from the begining of list.
reset();
}
@ -80,14 +93,19 @@ private:
}
auto head_rem = load(list->removed);
// Next IF checks if this thread is responisble for disposall of
// collected garbage.
// Fetch could be relaxed
// There exist possibility that no one will delete garbage at this
// time.
if (list->count.fetch_sub(1) == 1 && head_rem != nullptr &&
cas<Node *>(
list->removed, head_rem,
nullptr)) { // I am the last one and there is garbage to be
// removed.
// time but it will be deleted at some other time.
if (list->count.fetch_sub(1) == 1 && // I am the last one accessing
head_rem != nullptr && // There is some garbage
cas<Node *>(list->removed, head_rem,
nullptr) // No new garbage was added.
) {
// Delete all removed node following chain of next_rem starting
// from head_rem.
auto now = head_rem;
do {
auto next = load(now->next_rem);
@ -120,7 +138,9 @@ private:
do {
prev = curr;
curr = load(curr->next);
} while (valid() && is_removed());
} while (valid() && is_removed()); // Loop ends if end of list is
// found or if not removed
// element is found.
return this->derived();
}
It &operator++(int) { return operator++(); }
@ -136,7 +156,7 @@ private:
{
prev = nullptr;
curr = load(list->head);
while (valid() && is_removed()) {
if (valid() && is_removed()) {
operator++();
}
}
@ -150,9 +170,12 @@ private:
// leak is less dangerous.
auto node = new Node(data);
Node *next = nullptr;
// Insert at begining of list. Retrys on failure.
do {
next = load(list->head);
// First connect to next.
store(node->next, next);
// Then try to set as head.
} while (!cas(list->head, next, node));
}
@ -165,11 +188,17 @@ private:
bool remove()
{
assert(valid());
// Try to logically remove it.
if (cas(curr->removed, false, true)) {
// I removed it!!!
// Try to disconnect it from list.
if (!disconnect()) {
// Disconnection failed because Node relative location in
// list changed. Whe firstly must find it again and then try
// to disconnect it again.
find_and_disconnect();
}
// Add to list of to be garbage collected.
store(curr->next_rem, swap(list->removed, curr));
return true;
}
@ -184,6 +213,8 @@ private:
friend bool operator!=(const It &a, const It &b) { return !(a == b); }
private:
// Fids current element starting from the begining of the list Retrys
// until it succesffuly disconnects it.
void find_and_disconnect()
{
Node *bef = nullptr;
@ -191,28 +222,34 @@ private:
auto next = load(curr->next);
while (now != nullptr) {
if (now == curr) {
prev = bef;
// Found it.
prev = bef; // Set the correct previous node in list.
if (disconnect()) {
// succesffuly disconnected it.
return;
}
// Let's try again from the begining.
bef = nullptr;
now = load(list->head);
} else if (now == next) { // Comparison with next is
// optimization for early return.
return;
} else {
// Now isn't the one whe are looking for lets try next one.
bef = now;
now = load(now->next);
}
}
}
// Trys to disconnect currrent element from
bool disconnect()
{
auto next = load(curr->next);
if (prev != nullptr) {
store(prev->next, next);
if (load(prev->removed)) {
// previous isn't previous any more.
return false;
}
} else if (!cas(list->head, curr, next)) {
@ -278,14 +315,10 @@ public:
Iterator begin() { return Iterator(this); }
// ConstIterator begin() { return ConstIterator(this); }
ConstIterator cbegin() { return ConstIterator(this); }
Iterator end() { return Iterator(); }
// ConstIterator end() { return ConstIterator(); }
ConstIterator cend() { return ConstIterator(); }
std::size_t size() { return count.load(std::memory_order_consume); }

View File

@ -5,6 +5,9 @@
using std::pair;
// Multi thread safe map based on skiplist.
// K - type of key.
// T - type of data.
template <typename K, typename T>
class ConcurrentMap
{
@ -57,11 +60,13 @@ public:
list_it find(const K &key) { return accessor.find(key); }
// Returns iterator to item or first larger if it doesn't exist.
list_it_con find_or_larger(const T &item) const
{
return accessor.find_or_larger(item);
}
// Returns iterator to item or first larger if it doesn't exist.
list_it find_or_larger(const T &item)
{
return accessor.find_or_larger(item);

View File

@ -5,6 +5,9 @@
using std::pair;
// Multi thread safe multi map based on skiplist.
// K - type of key.
// T - type of data.
template <typename K, typename T>
class ConcurrentMultiMap
{
@ -53,11 +56,13 @@ public:
list_it find(const K &key) { return accessor.find(key); }
// Returns iterator to item or first larger if it doesn't exist.
list_it_con find_or_larger(const T &item) const
{
return accessor.find_or_larger(item);
}
// Returns iterator to item or first larger if it doesn't exist.
list_it find_or_larger(const T &item)
{
return accessor.find_or_larger(item);

View File

@ -2,6 +2,8 @@
#include "data_structures/concurrent/skiplist.hpp"
// Multi thread safe multiset based on skiplist.
// T - type of data.
template <class T>
class ConcurrentMultiSet
{
@ -36,11 +38,13 @@ public:
list_it find(const T &item) { return accessor.find(item); }
// Returns iterator to item or first larger if it doesn't exist.
list_it_con find_or_larger(const T &item) const
{
return accessor.find_or_larger(item);
}
// Returns iterator to item or first larger if it doesn't exist.
list_it find_or_larger(const T &item)
{
return accessor.find_or_larger(item);

View File

@ -3,6 +3,8 @@
#include "data_structures/concurrent/common.hpp"
#include "data_structures/concurrent/skiplist.hpp"
// Multi thread safe set based on skiplist.
// T - type of data.
template <class T>
class ConcurrentSet
{
@ -37,18 +39,21 @@ public:
list_it find(const T &item) { return accessor.find(item); }
// Returns iterator to item or first larger if it doesn't exist.
template <class K>
list_it_con find_or_larger(const K &item) const
{
return accessor.find_or_larger(item);
}
// Returns iterator to item or first larger if it doesn't exist.
template <class K>
list_it find_or_larger(const K &item)
{
return accessor.find_or_larger(item);
}
// Returns iterator to item or first larger if it doesn't exist.
template <class K>
list_it_con cfind_or_larger(const K &item)
{

View File

@ -334,6 +334,8 @@ public:
MultiIterator(SkipList *skiplist, const K &data)
: data(data), skiplist(skiplist)
{
// WHe must find the first element with K key.
// All of logic in this loop was taken from insert method.
while (true) {
auto level = find_path(skiplist, H - 1, data, preds, succs);
if (level == -1) {
@ -368,32 +370,36 @@ public:
return succs[0]->value();
}
// WRONG THIS CAN POSSIBLY NOT BE TRUE IF SOMEONE JUST AFTER THIS REMOVE
// ELEMENT AFTER THIS ONE.
// bool has_next()
// {
// assert(succs[0] != nullptr);
// return succs[0].forward(0) != nullptr;
// }
bool has_value() { return succs[0] != nullptr; }
MultiIterator &operator++()
{
assert(succs[0] != nullptr);
// This whole method can be optimized if it's valid to expect height
// NOTE: This whole method can be optimized if it's valid to expect
// height
// of 1 on same key elements.
// First update preds and succs.
for (int i = succs[0]->height - 1; i >= 0; i--) {
preds[i] = succs[i];
succs[i] = preds[i]->forward(i);
}
// If there exists current value then check if it is equal to our
// data.
if (succs[0] != nullptr) {
if (succs[0]->value() != data) {
// Current data isn't equal to our data that means that this
// is the end of list of same values.
succs[0] = nullptr;
} else {
// Current value is same as our data but whe must check that
// it is valid data and if not whe must wait for it to
// become valid.
while (succs[0] != succs[succs[0]->height - 1] ||
!succs[0]->flags.is_fully_linked()) {
usleep(250);
usleep(250); // Wait to become linked
// Reget succs.
for (int i = succs[0]->height - 1; i >= 0; i--) {
succs[i] = preds[i]->forward(i);
}
@ -427,6 +433,8 @@ public:
bool remove()
{
assert(succs[0] != nullptr);
// Calls skiplist remove method.
return skiplist->template remove<K>(
data, preds, succs,
SkipList<T>::template MultiIterator<K>::update_path);
@ -436,14 +444,18 @@ public:
static int update_path(SkipList *skiplist, int start, const K &item,
Node *preds[], Node *succs[])
{
// NOTE: This could be done more efficent than serching for item
// element again. Whe just need to use infromation already present
// in preds and succ because whe know that this method is used
// exclusively by passing it into skiplist remove method from
// MultiIterator remove method.
// One optimization here would be to wait for is_fully_linked to be
// true. That way that doesnt have to be done in constructor and
// ++ operator.
int level_found = succs[0]->height - 1;
assert(succs[0] == succs[level_found]);
// for (int i = level_found; i >= 0; i--) {
// // Someone has done something
// if (preds[i]->forward(i) != succs[i]) {
for (auto it = MultiIterator<K>(skiplist, item); it.has_value();
it++) {
if (it.succs[0] == succs[0]) { // Found it
@ -453,12 +465,7 @@ public:
}
}
// Someone removed it
// assert(succs[0]->flags.is_marked());
return -1;
// }
// }
// // Everything is fine
// return level_found;
}
const K &data;
@ -751,10 +758,12 @@ private:
return valid;
}
// Inserts non unique data into list.
// NOTE: Uses modified logic from insert method.
Iterator insert_non_unique(T &&data, Node *preds[], Node *succs[])
{
while (true) {
// TODO: before here was data.first
auto level = find_path(this, H - 1, data, preds, succs);
auto height = 1;
@ -849,7 +858,7 @@ private:
}
// Insert unique data
// TODO: This is almost all duplicate code from insert
// NOTE: This is almost all duplicate code from insert.
template <class K, class... Args>
std::pair<Iterator, bool> emplace(Node *preds[], Node *succs[], K &key,
Args &&... args)

View File

@ -20,11 +20,7 @@ protected:
public:
Combined() : data(0) {}
Combined(D *data, size_t off)
{
// assert((((size_t)data) & 0x7) == 0 && off < 8);
this->data = ((size_t)data) | off;
}
Combined(D *data, size_t off) { this->data = ((size_t)data) | off; }
bool valid() const { return data != 0; }
@ -72,6 +68,7 @@ protected:
size_t data;
};
// Base for all iterators. It can start from any point in map.
template <class It>
class IteratorBase : public Crtp<It>
{
@ -97,7 +94,11 @@ protected:
}
const RhBase *map;
// How many times did whe advance.
size_t advanced;
// Current position in array
size_t index;
public:
@ -123,12 +124,15 @@ protected:
do {
advanced++;
if (advanced >= map->capacity) {
// Whe have advanced more than the capacity of map is so whe
// are done.
map = nullptr;
advanced = index = ~((size_t)0);
break;
}
index = (index + 1) & mask;
} while (!map->array[index].valid());
} while (!map->array[index].valid()); // Check if there is element
// at current position.
return this->derived();
}
@ -221,6 +225,7 @@ public:
ConstIterator cend() const { return ConstIterator(); }
protected:
// Copys RAW BYTE data from other RhBase.
void copy_from(const RhBase &other)
{
capacity = other.capacity;
@ -235,6 +240,7 @@ protected:
}
}
// Takes data from other RhBase.
void take_from(RhBase &&other)
{
capacity = other.capacity;
@ -245,16 +251,17 @@ protected:
other.capacity = 0;
}
void init_array(size_t size)
// Initiazes array with given capacity.
void init_array(size_t capacity)
{
size_t bytes = sizeof(Combined) * size;
size_t bytes = sizeof(Combined) * capacity;
array = (Combined *)malloc(bytes);
std::memset(array, 0, bytes);
capacity = size;
this->capacity = capacity;
}
// True if before array has some values.
// Before array has to be released also.
// Before array must be released in the caller.
bool increase_size()
{
if (capacity == 0) {
@ -276,6 +283,7 @@ protected:
}
public:
// Cleares all data.
void clear()
{
free(array);
@ -297,7 +305,7 @@ protected:
return hash(std::hash<K>()(key)) & mask;
}
// This is rather expensive but offers good distribution.
// NOTE: This is rather expensive but offers good distribution.
size_t hash(size_t x) const
{
x = (x ^ (x >> 30)) * UINT64_C(0xbf58476d1ce4e5b9);

View File

@ -1,7 +1,7 @@
#include <functional>
#include "utils/crtp.hpp"
#include "data_structures/map/rh_common.hpp"
#include "utils/crtp.hpp"
#include "utils/option_ptr.hpp"
// HashMap with RobinHood collision resolution policy.
@ -47,17 +47,20 @@ public:
size_t now = index(key, mask);
size_t off = 0;
size_t border = 8 <= capacity ? 8 : capacity;
while (off < border) {
Combined other = array[now];
if (other.valid()) {
auto other_off = other.off();
if (other_off == off && key == other.ptr()->get_key()) {
// Found data.
return OptionPtr<D>(other.ptr());
} else if (other_off < off) { // Other is rich
break;
} // Else other has equal or greater offset, so he is poor.
} else {
// Empty slot means that there is no searched data.
break;
}
@ -76,16 +79,20 @@ public:
size_t now = index(key, mask);
size_t off = 0;
size_t border = 8 <= capacity ? 8 : capacity;
while (off < border) {
Combined other = array[now];
if (other.valid()) {
auto other_off = other.off();
if (other_off == off && key == other.ptr()->get_key()) {
// Element already exists.
return false;
} else if (other_off < off) { // Other is rich
// Set data.
array[now] = Combined(data, off);
// Move other data to the higher indexes,
while (other.increment_off()) {
now = (now + 1) & mask;
auto tmp = array[now];
@ -97,9 +104,11 @@ public:
}
}
data = other.ptr();
break; // Cant insert removed element
break; // Cant insert removed element because it would
// be to far from his real place.
} // Else other has equal or greater offset, so he is poor.
} else {
// Data can be placed in this empty slot.
array[now] = Combined(data, off);
count++;
return true;
@ -110,6 +119,8 @@ public:
}
}
// There isn't enough space for element pointed by data so whe must
// increase array.
increase_size();
return insert(data);
}
@ -121,6 +132,7 @@ public:
size_t now = index(key, mask);
size_t off = 0;
size_t border = 8 <= capacity ? 8 : capacity;
while (off < border) {
Combined other = array[now];
if (other.valid()) {
@ -130,6 +142,7 @@ public:
key == other_ptr->get_key()) { // Found it
auto before = now;
// Whe must move other elements one slot lower.
do {
// This is alright even for off=0 on found element
// because it wont be seen.
@ -139,7 +152,10 @@ public:
before = now;
now = (now + 1) & mask;
other = array[now];
} while (other.valid() && other.off() > 0);
} while (other.valid() &&
other.off() > 0); // Exit if whe encounter empty
// slot or data which is exactly
// in slot which it want's to be.
array[before] = Combined();
count--;
@ -149,6 +165,7 @@ public:
break;
} // Else other has equal or greater offset, so he is poor.
} else {
// If the element to be removed existed in map it would be here.
break;
}

View File

@ -15,6 +15,27 @@
// D must have method K& get_key()
// K must be comparable with ==.
// HashMap behaves as if it isn't owner of entrys.
//
// Main idea of this MultiMap is a tweak of logic in RobinHood.
// RobinHood offset from prefered slot is equal to the number of slots between
// [current slot and prefered slot>.
// While in this flavour of "multi RobinHood" offset from prefered slot is equal
// to the number of different keyed elements between his current slot and
// prefered slot.
// In the following examples slots will have keys as caracters. So something
// like this: |a| will mean that in this slot there is data with key 'a'.
// like this: | | will mean empty slot.
// like this: |...| will mean arbitary number of slots.
// like this: |b:a| will mean that a want's to be in slot but b is in't.
//
// Examples:
// |...|a:a|...| => off(a) = 0
// |...|a:a|a|...|a|...| => off(a) = 0
// |...|b:a|a|...| => off(a) = 1
// |...|b:a|b|...|b|a|...| => off(a) = 1
// |...|c:a|b|a|...| => off(a) = 2
// |...|c:a|c|...|c|b|...|b||a|...|a|...| => off(a) = 2
// ...
template <class K, class D, size_t init_size_pow2 = 2>
class RhHashMultiMap : public RhBase<K, D, init_size_pow2>
{
@ -124,14 +145,18 @@ public:
bool multi = false;
if (other_off == off && other.ptr()->get_key() == key) {
// Found the same
// Must skip same keyd values to insert new value at the
// end.
do {
now = (now + 1) & mask;
other = array[now];
if (!other.valid()) {
// Found empty slot in which data ca be added.
set(now, data, off);
return;
}
} while (other.equal(key, off));
// There is no empty slot after same keyed values.
multi = true;
} else if (other_off > off ||
other_poor(other, mask, start,
@ -142,6 +167,8 @@ public:
continue;
}
// Data will be insrted at current slot and all other data
// will be displaced for one slot.
array[now] = Combined(data, off);
auto start_insert = now;
while (is_off_adjusted(other, mask, start_insert, now,
@ -152,6 +179,7 @@ public:
array[now] = other;
other = tmp;
if (!other.valid()) {
// Found empty slot which means i can finish now.
count++;
return;
}
@ -159,12 +187,14 @@ public:
data = other.ptr();
break; // Cant insert removed element
} else {
// Found empty slot for data.
set(now, data, off);
return;
}
}
}
// There is't enough space for data.
increase_size();
add(data);
}
@ -179,14 +209,18 @@ public:
size_t off = 0;
size_t border = 8 <= capacity ? 8 : capacity;
Combined other = array[now];
while (other.valid() && off < border) {
const size_t other_off = other.off();
if (other_off == off && key == other.ptr()->get_key()) {
// Found same key data.
auto founded = capacity;
size_t started = now;
bool multi = false;
// Must find slot with searched data.
do {
if (other.ptr() == data) {
// founded it.
founded = now;
}
now = (now + 1) & mask;
@ -196,11 +230,14 @@ public:
break;
}
} while (other.equal(key, off) && (multi = true));
// multi = true is correct
if (founded == capacity) {
// Didn't found the data.
return false;
}
// Data will be removed by moving other data by one slot
// before.
auto bef = before_index(now, mask);
array[founded] = array[bef];
@ -223,6 +260,8 @@ public:
break;
} else { // Else other has equal or greater off, so he is poor.
// Must skip values of same keys but different key than
// data.
if (UNLIKELY(skip(now, other, other_off, mask))) {
break;
}
@ -317,54 +356,3 @@ private:
(end < start && p <= start && p > end);
}
};
// Unnecessary
// // Removes element. Returns removed element if it existed. It doesn't
// // specify which element from same key group will be removed.
// OptionPtr<D> remove(const K &key_in)
// {
//
// if (count > 0) {
// auto key = std::ref(key_in);
// size_t mask = this->mask();
// size_t now = index(key, mask);
// size_t off = 0;
// size_t checked = 0;
// size_t border = 8 <= capacity ? 8 : capacity;
// Combined other = array[now];
// while (other.valid() && off < border) {
// auto other_off = other.off();
// bool multi = false;
// if (other_off == off && key == other.ptr()->get_key()) {
// do {
// now = (now + 1) & mask;
// other = array[now];
// if (!other.valid()) {
// break;
// }
// other_off = other.off();
// } while (other_off == off &&
// other.ptr()->get_key() == key &&
// (multi = true)); // multi = true is correct
//
// auto bef = before_index(now, mask);
// auto ret = OptionPtr<D>(array[bef].ptr());
//
// move_before(now, bef, other, mask, multi);
// return ret;
//
// } else if (other_off < off) { // Other is rich
// break;
//
// } else { // Else other has equal or greater off, so he is
// poor.
// if (UNLIKELY(skip(now, other, other_off, mask))) {
// break;
// }
// off++;
// }
// }
// }
//
// return OptionPtr<D>();
// }

View File

@ -20,9 +20,17 @@ class Db
public:
using sptr = std::shared_ptr<Db>;
// import_snapshot will in constructor import latest snapshot into the db.
// NOTE: explicit is here to prevent compiler from evaluating const char *
// into a bool.
explicit Db(bool import_snapshot = true);
// import_snapshot will in constructor import latest snapshot into the db.
Db(const char *name, bool import_snapshot = true);
// import_snapshot will in constructor import latest snapshot into the db.
Db(const std::string &name, bool import_snapshot = true);
Db(const Db &db) = delete;
private:

View File

@ -53,7 +53,9 @@ public:
DbAccessor(Db &db, tx::Transaction &t);
//*******************VERTEX METHODS
// Returns iterator of VertexAccessor for all vertices.
// TODO: Implement class specaily for this return
// NOTE: This implementation must be here to be able to infere return type.
auto vertex_access()
{
return iter::make_map(
@ -63,12 +65,16 @@ public:
});
}
// Optionaly return vertex with given internal Id.
Option<const VertexAccessor> vertex_find(const Id &id);
// Creates new Vertex and returns filled VertexAccessor.
VertexAccessor vertex_insert();
// ******************* EDGE METHODS
// Returns iterator of EdgeAccessor for all edges.
// TODO: Implement class specaily for this return
// NOTE: This implementation must be here to be able to infere return type.
auto edge_access()
{
return iter::make_map(
@ -78,6 +84,7 @@ public:
});
}
// Optionally return Edge with given internal Id.
Option<const EdgeAccessor> edge_find(const Id &id);
// Creates new Edge and returns filled EdgeAccessor.
@ -89,15 +96,17 @@ public:
VertexAccessor const &to);
// ******************* LABEL METHODS
// Finds or crated label with given name.
const Label &label_find_or_create(const char *name);
// True if label with name exists.
bool label_contains(const char *name);
// ******************** TYPE METHODS
// Finds or creates edge_type with given name.
const EdgeType &type_find_or_create(const char *name);
// True if edge_type with given name exists.
bool type_contains(const char *name);
// ******************** PROPERTY METHODS
@ -135,6 +144,8 @@ public:
// True if commit was successful, or false if transaction was aborted.
bool commit();
// Aborts transaction.
void abort();
private:
@ -146,11 +157,3 @@ private:
DbTransaction db_transaction;
};
// ********************** CONVENIENT FUNCTIONS
template <class R>
bool option_fill(Option<R> &o)
{
return o.is_present() && o.get().fill();
}

View File

@ -28,14 +28,20 @@ public:
// Cleans edge part of database. MUST be called by one cleaner thread at
// one time.
// TODO: Should be exctracted to separate class which can enforce one thread
// at atime.
void clean_edge_section();
// Cleans vertex part of database. MUST be called by one cleaner thread at
// one time..
// TODO: Should be exctracted to separate class which can enforce one thread
// at atime.
void clean_vertex_section();
// Updates indexes of Vertex/Edges in index_updates. True if indexes are
// updated successfully. False means that transaction failed.
// TODO: Should be moved to Indexes class where it will this DbTransaction
// as an argument.
bool update_indexes();
// Will update indexes for given element TG::record_t. Actual update happens

View File

@ -10,9 +10,10 @@ class Cleaning
public:
// How much sec is a cleaning_cycle in which cleaner will clean at most
// once.
// once. Starts cleaner thread.
Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle);
// Destroys this object after this thread joins cleaning thread.
~Cleaning();
private:
@ -22,5 +23,6 @@ private:
std::vector<std::unique_ptr<Thread>> cleaners;
// Should i continue cleaning.
std::atomic<bool> cleaning = {true};
};

View File

@ -30,7 +30,9 @@ private:
// currently active database
std::atomic<Db *> active_db;
// Cleaning thread.
Cleaning cleaning = {dbs, CONFIG_INTEGER(config::CLEANING_CYCLE_SEC)};
// Snapshoting thread.
Snapshoter snapshoter = {dbs, CONFIG_INTEGER(config::SNAPSHOT_CYCLE_SEC)};
};

View File

@ -98,7 +98,8 @@ public:
return true;
}
// Extracts parts while stripping data of array chars and qutation marks.
// Extracts parts of str while stripping parts of array chars and qutation
// marks. Parts are separated with delimiter.
void extract(char *str, const char delimiter, vector<char *> &sub_str)
{
int head = 0;
@ -148,12 +149,9 @@ public:
}
sub_str.push_back(&str[head]);
//
// for (auto s : sub_str) {
// cout << "#" << s;
// }
}
// Optionaly return vertex with given import local id if it exists.
Option<VertexAccessor> const &get_vertex(size_t id)
{
if (vertices.size() > id) {
@ -168,6 +166,8 @@ public:
DbAccessor &db;
Logger logger;
// Varius marks and delimiters. They can be freely changed here and
// everything will work.
char parts_mark = ',';
char parts_array_mark = ',';
char type_mark = ':';
@ -176,6 +176,6 @@ public:
char closed_bracket = ']';
protected:
// All created vertices which have import local id
// All created vertices which have import local id.
vector<Option<VertexAccessor>> vertices;
};

View File

@ -46,7 +46,19 @@ bool equal_str(const char *a, const char *b) { return strcasecmp(a, b) == 0; }
// CSV importer for importing multiple files regarding same graph.
// CSV format of file should be following:
// header
// line of data
// line of data
// ...
//
// Where header should be composed of parts splited by parts_mark. Number of
// parts should be same as number of parts in every line of data. Parts should
// be of format name:type where name is alfanumeric identifyer of data in thath
// column and type should be one of: id, from, to, label, type, bool, int, long,
// float, double, string, bool[], int[], long[], float[], double[], string[].
// If name is missing the column data wont be saved into the elements.
// if the type is missing the column will be interperted as type string. If
// neither name nor type are present column will be skipped.
class CSVImporter : public BaseImporter
{
@ -70,6 +82,8 @@ public:
private:
// Loads data from file and returns number of loaded name.
// TG - TypeGroup
// F - function which will create element from filled element skelleton.
template <class TG, class F>
size_t import(std::fstream &file, F f, bool vertex)
{
@ -104,10 +118,6 @@ private:
size_t line_no = 1;
ElementSkeleton es(db);
while (std::getline(file, line)) {
// if (line_no % 1000 == 0) {
// cout << line_no << endl;
// }
// cout << line << endl;
sub_str.clear();
es.clear();
@ -196,6 +206,7 @@ private:
if (tmp_vec.size() > 2) {
logger.error("To much sub parts in header part");
return make_option<unique_ptr<Filler>>();
} else if (tmp_vec.size() < 2) {
if (tmp_vec.size() == 1) {
logger.warn("Column: {} doesn't have specified type so string "
@ -203,16 +214,19 @@ private:
tmp_vec[0]);
name = tmp_vec[0];
type = _string;
} else {
logger.warn("Empty colum definition, skiping column.");
std::unique_ptr<Filler> f(new SkipFiller());
return make_option(std::move(f));
}
} else {
name = tmp_vec[0];
type = tmp_vec[1];
}
// Create adequat filler
if (equal_str(type, "id")) {
std::unique_ptr<Filler> f(
name[0] == '\0' ? new IdFiller<TG>()
@ -245,8 +259,6 @@ private:
// *********************** PROPERTIES
} else if (equal_str(type, "bool")) {
// return make_filler_property<BoolFiller>(vertex, name,
// Flags::Bool);
std::unique_ptr<Filler> f(
new BoolFiller<TG>(property_key<TG>(name, Flags::Bool)));
return make_option(std::move(f));

View File

@ -6,7 +6,7 @@
#include "storage/vertex_accessor.hpp"
// Holder for element data which he can then insert as a vertex or edge into the
// database depending on the available data.
// database depending on the available data and called add_* method.
class ElementSkeleton
{
@ -99,15 +99,6 @@ public:
Option<size_t> element_id() { return el_id; }
private:
// template <class A>
// void add_propreties(A &ra)
// {
// for (auto prop : properties) {
// assert(prop.prop.is_present());
// ra.set(prop.key, prop.prop.take());
// }
// }
DbAccessor &db;
Option<size_t> el_id;

View File

@ -5,6 +5,10 @@
#include "import/fillings/filler.hpp"
#include "utils/array_store.hpp"
// Parses Array of elements type T.
// TG - Type group
// T - type of element in array.
// A - property type in database for holding arrays.
template <class TG, class T, class A>
class ArrayFiller : public Filler
{

View File

@ -6,6 +6,8 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses boolean.
// TG - Type group
template <class TG>
class BoolFiller : public Filler
{

View File

@ -6,6 +6,8 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses double.
// TG - Type group
template <class TG>
class DoubleFiller : public Filler
{

View File

@ -3,6 +3,8 @@
#include "import/element_skeleton.hpp"
#include "utils/option.hpp"
// Common class for varius classes which accept one part from data line in
// import, parses it and adds it into element skelleton.
class Filler
{
public:

View File

@ -6,6 +6,8 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses float.
// TG - Type group
template <class TG>
class FloatFiller : public Filler
{

View File

@ -6,6 +6,7 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses from id of vertex for edge.
class FromFiller : public Filler
{

View File

@ -2,6 +2,8 @@
#include "import/fillings/filler.hpp"
// Parses import local Id.
// TG - Type group
template <class TG>
class IdFiller : public Filler
{

View File

@ -6,6 +6,8 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses int32.
// TG - Type group
template <class TG>
class Int32Filler : public Filler
{

View File

@ -6,6 +6,8 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses int64.
// TG - Type group
template <class TG>
class Int64Filler : public Filler
{

View File

@ -3,6 +3,7 @@
#include "database/db_accessor.hpp"
#include "import/fillings/filler.hpp"
// Parses array of labels.
class LabelFiller : public Filler
{

View File

@ -6,6 +6,7 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Skips column.
class SkipFiller : public Filler
{

View File

@ -6,6 +6,8 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses string.
// TG - Type group
template <class TG>
class StringFiller : public Filler
{

View File

@ -6,6 +6,7 @@
#include "storage/model/properties/flags.hpp"
#include "storage/model/properties/property_family.hpp"
// Parses to import local id of vertex for edge.
class ToFiller : public Filler
{

View File

@ -3,6 +3,7 @@
#include "database/db_accessor.hpp"
#include "import/fillings/filler.hpp"
// Parses type of edge.
class TypeFiller : public Filler
{

View File

@ -336,6 +336,21 @@ auto load_queries(Db &db)
.has_property(prop_name, args[0])
.clone_to(n) // Savepoint
.replace(r); // Load savepoint
// Above statments + .to().for_all([&](auto m) {}) will unrool into:
// for(auto edge:type.index.for_range(t)){
// auto from_vertex=edge.from();
// if(from_vertex.fill()){
// auto &prop=from_vertex.at(prop_name);
// if(prop==args[0]){
// auto to_vertex=edge.to();
// if(to_vertex.fill()){
// // Here you have all data.
// // n == from_vertex
// // m == to_vertex
// }
// }
// }
// }
auto it_vertex = t.vertex_access()
.fill()
@ -343,6 +358,24 @@ auto load_queries(Db &db)
.clone_to(n) // Savepoint
.out()
.type(type);
// Above statments + .to().for_all([&](auto m) {}) will unrool into:
// for(auto from_vertex:t.vertex_access(t)){
// if(from_vertex.fill()){
// auto &prop=from_vertex.at(prop_name);
// if(prop==args[0]){
// for(auto edge:from_vertex.out()){
// if(edge.edge_type() == type){
// auto to_vertex=edge.to();
// if(to_vertex.fill()){
// // Here you have all data.
// // n == from_vertex
// // m == to_vertex
// }
// }
// }
// }
// }
// }
if (it_type.count() > it_vertex.count()) {
// Going through vertices wiil probably be faster

View File

@ -13,14 +13,17 @@ namespace serialization
template <class W>
void serialize_vertex(VertexAccessor const &v, W &writer)
{
// Serialize vertex id
writer.start_vertex(v.id());
// Serialize labels
auto const &labels = v.labels();
writer.label_count(labels.size());
for (auto &label : labels) {
writer.label(label.get().str());
}
// Serialize propertys
auto const &propertys = v.properties();
writer.property_count(propertys.size());
for (auto &prop : propertys) {
@ -35,10 +38,13 @@ void serialize_vertex(VertexAccessor const &v, W &writer)
template <class W>
void serialize_edge(EdgeAccessor const &e, W &writer)
{
// Serialize to and from vertices ids.
writer.start_edge(e.from().id(), e.to().id());
// Serialize type
writer.edge_type(e.edge_type().str());
// Serialize propertys
auto const &propertys = e.properties();
writer.property_count(propertys.size());
for (auto &prop : propertys) {
@ -57,12 +63,14 @@ std::pair<Id, VertexAccessor> deserialize_vertex(DbAccessor &db, D &reader)
auto v = db.vertex_insert();
auto old_id = reader.vertex_start();
// Deserialize labels
std::string s;
for (auto i = reader.label_count(); i > 0; i--) {
auto &label_key = db.label_find_or_create(reader.label().c_str());
v.add_label(label_key);
}
// Deserialize propertys
for (auto i = reader.property_count(); i > 0; i--) {
auto &family =
db.vertex_property_family_get(reader.property_name().c_str());
@ -81,14 +89,17 @@ template <class D, class S>
EdgeAccessor deserialize_edge(DbAccessor &db, D &reader, S &store)
{
auto ids = reader.edge_start();
// Deserialize from and to ids of vertices.
VertexAccessor &from = store.at(ids.first);
VertexAccessor &to = store.at(ids.second);
auto e = db.edge_insert(from, to);
// Deserialize type
auto &edge_type_key = db.type_find_or_create(reader.edge_type().c_str());
e.edge_type(edge_type_key);
// Deserialize properties
for (auto i = reader.property_count(); i > 0; i--) {
auto &family =
db.edge_property_family_get(reader.property_name().c_str());

View File

@ -12,6 +12,9 @@
// Decodes stored snapshot.
// Caller must respect loading order to be same as stored order with
// SnapshotEncoder.
// Main idea of knowing when something starts and ends is at certain points try
// to deserialize string and compare it with logically expected string seted by
// the SnapshotEncoder.
class SnapshotDecoder : public GraphDecoder
{
public:
@ -68,6 +71,8 @@ public:
T property()
{
if (decoder.is_list()) {
// Whe are deserializing an array.
auto size = decoder.list_header();
if (decoder.is_bool()) {
ArrayStore<bool> store;
@ -100,6 +105,8 @@ public:
return T::handle(std::move(store));
}
} else {
// Whe are deserializing a primitive.
if (decoder.is_bool()) {
return T::handle(decoder.read_bool());

View File

@ -11,8 +11,7 @@
#include "utils/stream_wrapper.hpp"
// Represents creation of a snapshot. Contains all necessary informations
// for
// write. Caller is responisble to structure his calls as following:
// for write. Caller is responisble to structure his calls as following:
// * property_name_init
// * label_name_init
// * edge_type_name_init

View File

@ -53,6 +53,7 @@ private:
// Will return different name on every call.
std::string snapshot_file(std::time_t const &now, const char *type);
// Returns name of snapshot commit file.
std::string snapshot_commit_file();
// Path to directory of database. Ensures that all necessary directorys

View File

@ -2,7 +2,6 @@
#include "mvcc/record.hpp"
#include "storage/model/edge_model.hpp"
// #include "storage/model/properties/traversers/jsonwriter.hpp"
class Edge : public mvcc::Record<Edge>
{

View File

@ -39,6 +39,7 @@ public:
CharStr char_str() { return CharStr(&id[0]); }
// Index of esges which have this type.
type_index_t &index() const;
private:

View File

@ -18,6 +18,7 @@ class Garbage
public:
Garbage(tx::Engine &e) : engine(e) {}
// Will safely dispose of data.
void dispose(tx::Snapshot<Id> &&snapshot, DeleteSensitive *data);
// Cleaner thread should call this method every some time. Removes data

View File

@ -49,8 +49,10 @@ public:
// Will change ordering of record to descending.
void set_descending();
// true if record is nullptr.
bool empty() const;
// True if this index record i valid for given Transaction.
bool is_valid(tx::Transaction &t) const;
// True if it can be removed.
@ -62,7 +64,7 @@ public:
const K key;
private:
bool descending = false;
bool descending = false; // TODO: this can be passed as template argument.
typename TG::record_t *const record{nullptr};
typename TG::vlist_t *const vlist{nullptr};
};

View File

@ -1,40 +0,0 @@
// #pragma once
//
// TODO: DEPRICATED
//
// #include <memory>
//
// #include "data_structures/concurrent/concurrent_set.hpp"
// #include "storage/indexes/index_record.hpp"
//
// template <class T>
// class IndexRecordCollection
// {
// public:
// using index_record_t = IndexRecord<T>;
// using index_record_collection_t = ConcurrentSet<index_record_t>;
//
// IndexRecordCollection()
// : records(std::make_unique<index_record_collection_t>())
// {
// }
//
// void add(index_record_t &&record)
// {
// auto accessor = records->access();
// accessor.insert(std::forward<index_record_t>(record));
// }
//
// auto access()
// {
// return records->access();
// }
//
// // TODO: iterator and proxy
//
// private:
// std::unique_ptr<index_record_collection_t> records;
// };
//
// using VertexIndexRecordCollection = IndexRecordCollection<Vertex>;
// using EdgeIndexRecordCollection = IndexRecordCollection<Edge>;

View File

@ -4,12 +4,14 @@
#include "storage/type_group_edge.hpp"
#include "storage/type_group_vertex.hpp"
// Record for updating indexes of edge
struct IndexUpdateEdge
{
EdgeRecord *vlist;
Edge *record;
};
// Record for updatin indexes of vertex
struct IndexUpdateVertex
{
VertexRecord *vlist;

View File

@ -23,7 +23,7 @@ public:
// currently caller has to get index through object that contains
// the index
// TODO: redesign
//
// this was a nice try
@ -252,15 +252,20 @@ private:
auto oindex = holder.get_write(t.trans);
if (oindex.is_present()) {
// Inexed to which whe must insert is present. This wouldn't be the
// case if someone removed it.
auto index = oindex.get();
// Iterate over all elements and add them into index. Fail if
// some insert failed.
bool res = iter.all([&](auto elem) {
// Try to insert record.
if (!index->insert(elem.first.create_index_record(
std::move(elem.second)))) {
// Index is probably unique.
// Index wasn't successfully filled so whe should remove it
// an safely dispose of it.
auto owned_maybe = holder.remove_index(index);
if (owned_maybe.is_present()) {
db.garbage.dispose(db.tx_engine.snapshot(),
@ -272,6 +277,8 @@ private:
return true;
});
if (res) {
// Index has been updated accordingly and whe can activate it
// for read.
index->activate();
return true;
}
@ -286,6 +293,8 @@ private:
{
auto owned_maybe = ih.remove_index();
if (owned_maybe.is_present()) {
// Index was successfully removed so whe are responsible for
// dispoising it safely.
db.garbage.dispose(db.tx_engine.snapshot(),
owned_maybe.get().release());
return true;

View File

@ -1,21 +0,0 @@
#pragma once
// TODO: DEPRICATED
template <class T>
struct Ascending
{
constexpr bool operator()(const T &lhs, const T &rhs) const
{
return lhs < rhs;
}
};
template <class T>
struct Descending
{
constexpr bool operator()(const T &lhs, const T &rhs) const
{
return lhs > rhs;
}
};

View File

@ -42,6 +42,7 @@ public:
CharStr char_str() const { return CharStr(name.c_str()); }
// Index of vertices with current label.
label_index_t &index() const;
private:

View File

@ -2,7 +2,6 @@
#include "data_structures/map/rh_hashmultimap.hpp"
#include "storage/edge_record.hpp"
// #include "storage/vertex_record.hpp"
class EdgeMap
{

View File

@ -15,6 +15,9 @@ template <class TG, class T>
using type_key_t =
typename PropertyFamily<TG>::PropertyType::template PropertyTypeKey<T>;
// Collcetion of stored properties.
// NOTE: Currently underlying strucutre is a vector which is fine for smaller
// number of properties.
template <class TG>
class Properties
{
@ -41,10 +44,12 @@ public:
template <class T>
OptionPtr<const T> at(type_key_t<T> &key) const
{
auto f_key = key.family_key();
for (auto &prop : props) {
if (prop.key == f_key) {
return OptionPtr<const T>(&(prop.template as<T>()));
if (prop.key == key) {
if (prop.template is<T>()) {
return OptionPtr<const T>(&(prop.template as<T>()));
}
break;
}
}

View File

@ -104,13 +104,19 @@ public:
// Ordered on POINTERS to PropertyType.
// When compared with PropertyFamilyKey behaves as PropertyFamilyKey.
template <class T>
class PropertyTypeKey : public TotalOrdering<PropertyTypeKey<T>>
class PropertyTypeKey
: public TotalOrdering<PropertyTypeKey<T>>,
public TotalOrdering<PropertyFamilyKey, PropertyTypeKey<T>>,
public TotalOrdering<PropertyTypeKey<T>, PropertyFamilyKey>
{
friend class PropertyType;
PropertyTypeKey(const PropertyType &type) : type(type) {}
public:
PropertyFamilyKey family_key() { return PropertyFamilyKey(type); }
PropertyFamilyKey family_key() const
{
return PropertyFamilyKey(type);
}
Type const &prop_type() const { return type.type; }
@ -126,6 +132,30 @@ public:
return &(lhs.type) < &(rhs.type);
}
friend bool operator==(const PropertyFamilyKey &lhs,
const PropertyTypeKey &rhs)
{
return lhs == rhs.family_key();
}
friend bool operator<(const PropertyFamilyKey &lhs,
const PropertyTypeKey &rhs)
{
return lhs < rhs.family_key();
}
friend bool operator==(const PropertyTypeKey &lhs,
const PropertyFamilyKey &rhs)
{
return lhs.family_key() == rhs;
}
friend bool operator<(const PropertyTypeKey &lhs,
const PropertyFamilyKey &rhs)
{
return lhs.family_key() < rhs;
}
private:
const PropertyType &type;
};
@ -190,12 +220,15 @@ public:
friend bool operator==(const PropertyFamily &lhs,
const PropertyFamily &rhs);
// Place for index of TG::elements which have property from this family.
IndexHolder<TG, std::nullptr_t> index;
private:
const std::string name_v;
// This is exclusivly for getNull method.
PropertyType *null_type{nullptr};
// TODO: Because types wont be removed this could be done with more efficent
// data structure.
ConcurrentMap<Type, std::unique_ptr<PropertyType>> types;

View File

@ -68,22 +68,22 @@
// Generates field in a union with a given type and name
#define GENERATE_UNION_FIELD(type_name, union_name) type_name union_name
// Generates signatures GENERATE_define_generator(type_name, union_name );
// Generates signatures define_generator(type_name, union_name );
// for every pair type,property
#define GENERATE_FOR_ALL_PROPERTYS(define_generator) \
GENERATE_##define_generator(Null, null_v); \
GENERATE_##define_generator(Bool, bool_v); \
GENERATE_##define_generator(Int32, int32_v); \
GENERATE_##define_generator(Int64, int64_v); \
GENERATE_##define_generator(Float, float_V); \
GENERATE_##define_generator(Double, double_v); \
GENERATE_##define_generator(String, string_v); \
GENERATE_##define_generator(ArrayBool, array_bool); \
GENERATE_##define_generator(ArrayInt32, array_int32); \
GENERATE_##define_generator(ArrayInt64, array_int64); \
GENERATE_##define_generator(ArrayFloat, array_float); \
GENERATE_##define_generator(ArrayDouble, array_double); \
GENERATE_##define_generator(ArrayString, array_string);
define_generator(Null, null_v); \
define_generator(Bool, bool_v); \
define_generator(Int32, int32_v); \
define_generator(Int64, int64_v); \
define_generator(Float, float_V); \
define_generator(Double, double_v); \
define_generator(String, string_v); \
define_generator(ArrayBool, array_bool); \
define_generator(ArrayInt32, array_int32); \
define_generator(ArrayInt64, array_int64); \
define_generator(ArrayFloat, array_float); \
define_generator(ArrayDouble, array_double); \
define_generator(ArrayString, array_string);
// Holds property and has some means of determining its type.
// T must have method get_type() const which returns Type.
@ -97,12 +97,13 @@ class PropertyHolder
public:
PropertyHolder() = delete;
GENERATE_FOR_ALL_PROPERTYS(CONSTRUCTOR_FOR_DATA);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CONSTRUCTOR_FOR_DATA);
PropertyHolder(PropertyHolder const &other) : key(other.key)
{
switch (other.key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_CONSTRUCTOR_COPY);
GENERATE_FOR_ALL_PROPERTYS(
GENERATE_CASE_CLAUSE_FOR_CONSTRUCTOR_COPY);
default:
assert(false);
}
@ -112,7 +113,8 @@ public:
PropertyHolder(PropertyHolder &&other) : key(other.key)
{
switch (other.key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
GENERATE_FOR_ALL_PROPERTYS(
GENERATE_CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
default:
assert(false);
}
@ -123,7 +125,8 @@ public:
{
assert(other.key.get_type() == key.get_type());
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
GENERATE_FOR_ALL_PROPERTYS(
GENERATE_CASE_CLAUSE_FOR_CONSTRUCTOR_MOVE);
default:
assert(false);
}
@ -133,7 +136,7 @@ public:
~PropertyHolder()
{
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_DESTRUCTOR);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_DESTRUCTOR);
default:
assert(false);
}
@ -161,7 +164,7 @@ public:
void accept(Handler &h) const
{
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_HANDLER);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_HANDLER);
default:
assert(false);
}
@ -172,7 +175,8 @@ public:
void accept_primitive(Handler &h) const
{
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_HANDLER_PRIMITIVE);
GENERATE_FOR_ALL_PROPERTYS(
GENERATE_CASE_CLAUSE_FOR_HANDLER_PRIMITIVE);
default:
assert(false);
}
@ -181,7 +185,7 @@ public:
std::ostream &print(std::ostream &stream) const
{
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_PRINT);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_PRINT);
default:
assert(false);
}
@ -197,7 +201,7 @@ public:
{
if (key == other.key) {
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_COMPARISON);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_COMPARISON);
default:
assert(false);
}
@ -211,7 +215,7 @@ public:
{
if (key.get_type() == other.key.get_type()) {
switch (key.get_type().flags()) {
GENERATE_FOR_ALL_PROPERTYS(CASE_CLAUSE_FOR_COMPARISON);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_CASE_CLAUSE_FOR_COMPARISON);
default:
assert(false);
}
@ -269,7 +273,7 @@ private:
// Stored data.
union
{
GENERATE_FOR_ALL_PROPERTYS(UNION_FIELD);
GENERATE_FOR_ALL_PROPERTYS(GENERATE_UNION_FIELD);
};
};

View File

@ -8,7 +8,8 @@ template <class TG>
using property_key =
typename PropertyFamily<TG>::PropertyType::PropertyFamilyKey;
// Property Class designated for creation outside the database.
// Property Class designated for creation inside the database. Meant for
// storage.
template <class TG>
class StoredProperty : public PropertyHolder<property_key<TG>>
{
@ -16,7 +17,8 @@ class StoredProperty : public PropertyHolder<property_key<TG>>
const static class PropertyFamily<TG> null_family;
public:
// Needed for properties to return reference on stored property when they
// NOTE: Needed for properties to return reference on stored property when
// they
// don't cointain searched property.
const static class StoredProperty<TG> null;

View File

@ -29,7 +29,7 @@ public:
// True if vertex isn't connected to any other vertex.
bool isolated() const;
// False if it's label with it already.
// False if it already has labe.
bool add_label(const Label &label);
// False if it doesn't have label.

View File

@ -60,26 +60,6 @@ public:
return a > b || a == b;
}
// // true if no border or this > key or this >= key depends on border type.
// bool operator>(const T &other) const
// {
// return !key.is_present() || key.get() > other ||
// (type == Including && key.get() == other);
// }
//
// // true if this border is inclusive and key is present and key == other.
// bool operator==(const T &other) const
// {
// return type == Including && key.is_present() && key.get() == other;
// }
//
// // true if no border or this < key or this <= key depends on border type.
// bool operator<(const T &other) const
// {
// return !key.is_present() || key.get() < other ||
// (type == Including && key.get() == other);
// }
Option<T> key;
BorderType type;
};

View File

@ -98,6 +98,12 @@ public:
});
}
// Calls update on values and returns resoult.
auto update()
{
return map([](auto ar) { return ar.update(); });
}
// Filters with property under given key
template <class KEY>
auto has_property(KEY &key)
@ -112,13 +118,6 @@ public:
return filter([&](auto &va) { return va.at(key) == prop; });
}
// Copy-s all pasing value to t before they are returned.
// auto clone_to(Option<T> &t)
// {
// return iter::make_inspect<decltype(std::move(*this))>(
// std::move(*this), [&](auto &v) { t = Option<T>(v); });
// }
// Copy-s pasing value to t before they are returned.
auto clone_to(Option<const T> &t)
{

View File

@ -5,6 +5,7 @@
// Represents number of to be returned elements from iterator. Where acutal
// number is probably somwhere in [min,max].
// NOTE: Experimental
class Count : public TotalOrdering<Count>
{

View File

@ -14,6 +14,7 @@ namespace iter
// I - iterator type
// J - iterator type returned from OP
// OP - type of mapper function
// TODO: Split into flat operation and map operation.
template <class T, class I, class J, class OP>
class FlatMap : public IteratorBase<T>,
public Composable<T, FlatMap<T, I, J, OP>>

View File

@ -7,7 +7,7 @@ namespace iter
{
// Class which maps values returned by I iterator into value of type T with OP
// function and ends when op return empty optional.
// function and ends when op returns empty optional.
// T - type of return value
// I - iterator type
// OP - type of mapper function. OP: V -> Option<T>

View File

@ -5,8 +5,7 @@
#include <ext/aligned_buffer.h>
#include <utility>
// Optional object storage
// Optional object storage. It maybe has and maybe dosent have objet of type T.
template <class T>
class Option
{
@ -86,6 +85,7 @@ public:
return *this;
}
// True if object i present.
bool is_present() const { return initialized; }
T &get() noexcept
@ -103,6 +103,7 @@ public:
}
}
// Returns ref to object if present else other.
T const &get_or(T const &other) const
{
if (is_present()) {
@ -203,3 +204,10 @@ auto make_option_const(const T &&data)
{
return Option<const T>(std::move(data));
}
// HELPER FUNCTIONS
template <class R>
bool option_fill(Option<R> &o)
{
return o.is_present() && o.get().fill();
}

View File

@ -1,5 +1,6 @@
#pragma once
// Like option just for pointers. More efficent than option.
template <class T>
class OptionPtr
{

View File

@ -2,6 +2,7 @@
#include "utils/total_ordering.hpp"
// Type which represents nothing.
class Void : public TotalOrdering<Void>
{
public:

View File

@ -6,15 +6,18 @@
// Implementations should follow the form:
// border_return_type border_class::method_name(arguments){
// return
// CALL(method_name(trans(arguments)))/HALF_CALL(method_name(trans(arguments)));
// CALL(method_name(trans(arguments)))
// or
// HALF_CALL(method_name(trans(arguments)));
// }
// **************************** HELPER DEFINES *******************************//
// returns transformed pointer
// returns transformed this pointer
#define THIS (trans(this))
// Performs call x on transformed border class.
// In border class performs call x on original class.
#define HALF_CALL(x) (THIS->x)
// Performs call x on transformed border class and returns transformed output.
// In border class performs call x on original class and produces transformed
// output.
#define CALL(x) trans(HALF_CALL(x))
// Creates destructor for border type x which is original type y.
@ -58,24 +61,6 @@
namespace barrier
{
// ************************* EdgePropertyType
// #define FOR_ALL_PROPS_delete_EdgePropertyType(x) \
// template <> \
// EdgePropertyType<x>::~EdgePropertyType() \
// { \
// HALF_CALL(~PropertyTypeKey()); \
// }
// INSTANTIATE_FOR_PROPERTY(FOR_ALL_PROPS_delete_EdgePropertyType)
// ************************* VertexPropertyType
// #define FOR_ALL_PROPS_delete_VertexPropertyType(x) \
// template <> \
// VertexPropertyType<x>::~VertexPropertyType() \
// { \
// HALF_CALL(~PropertyTypeKey()); \
// }
// // INSTANTIATE_FOR_PROPERTY(FOR_ALL/_PROPS_delete_VertexPropertyType)
// ***************** Label
VertexIndex<std::nullptr_t> &Label::index() const { return CALL(index()); }
@ -542,12 +527,6 @@ void RecordStream<Stream>::write(const EdgeStoredProperty &prop)
HALF_CALL(write(trans(prop)));
}
// template <class Stream>
// void RecordStream<Stream>::write_null()
// {
// HALF_CALL(write_null());
// }
template <class Stream>
void RecordStream<Stream>::write(const Null &v)
{
@ -699,31 +678,31 @@ template class RecordStream<io::Socket>;
// **************************** ERROR EXAMPLES ****************************** //
// **************************** COMPILE TIME
/*
error:
### error:
../libmemgraph.a(barrier.cpp.o): In function `Option<barrier::VertexAccessor
const> Option<VertexAccessor const>::map<barrier::VertexAccessor const>()':
/home/ktf/Workspace/memgraph/include/utils/option.hpp:111: undefined reference
to `barrier::VertexAccessor::VertexAccessor<VertexAccessor const>(VertexAccessor
const&&)'
description:
# description:
Constructor VertexAccessor<::VertexAccessor const>(::VertexAccessor const&&)
isn't written.
error:
### error:
../libmemgraph.a(barrier.cpp.o): In function `barrier::EdgeAccessor::from()
const':
/home/ktf/Workspace/memgraph/src/barrier/barrier.cpp:501: undefined reference to
`barrier::VertexAccessor::VertexAccessor<barrier::VertexAccessor
const>(barrier::VertexAccessor const&&)'
description:
# description:
Move constructor VertexAccessor<VertexAccessor const>(VertexAccessor const&&)
isn't defined.
error:
### error:
/home/ktf/Workspace/memgraph/src/barrier/barrier.cpp:282:12: error: call to
'trans' is ambiguous
return CALL(at(trans(key)));
@ -769,7 +748,7 @@ from macro 'TRANSFORM_REF'
x const &trans(y const &l) { return ref_as<x const>(l); } \
...
description:
# description:
There is no valid transformation for types on which trans is called.
*/

View File

@ -13,7 +13,6 @@ DbAccessor::DbAccessor(Db &db, tx::Transaction &t)
}
// VERTEX METHODS
// auto DbAccessor::vertex_access()
Option<const VertexAccessor> DbAccessor::vertex_find(const Id &id)
{
@ -36,8 +35,11 @@ EdgeAccessor DbAccessor::edge_insert(VertexAccessor &from, VertexAccessor &to)
{
auto edge_accessor = db_transaction.db.graph.edges.insert(
db_transaction, from.vlist, to.vlist);
// Connect edge with from,to vertices.
from->data.out.add(edge_accessor.vlist);
to->data.in.add(edge_accessor.vlist);
return edge_accessor;
}
@ -46,8 +48,11 @@ EdgeAccessor DbAccessor::edge_insert(VertexAccessor const &from,
{
auto edge_accessor = db_transaction.db.graph.edges.insert(
db_transaction, from.vlist, to.vlist);
// Connect edge with updated from,to vertices.
from.update()->data.out.add(edge_accessor.vlist);
to.update()->data.in.add(edge_accessor.vlist);
return edge_accessor;
}
@ -107,6 +112,7 @@ bool DbAccessor::commit()
db_transaction.trans.commit();
return true;
} else {
// Index update wasn't successfull so whe are aborting transaction.
db_transaction.trans.abort();
return false;
}

View File

@ -12,6 +12,7 @@
Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
: dbms(dbs), cleaning_cycle(cleaning_cycle)
{
// Start the cleaning thread
cleaners.push_back(
std::make_unique<Thread>([&, cleaning_cycle = cleaning_cycle ]() {
Logger logger = logging::log->logger("Cleaner");
@ -22,8 +23,11 @@ Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
while (cleaning.load(std::memory_order_acquire)) {
std::time_t now = std::time(nullptr);
// Maybe it's cleaning time.
if (now >= last_clean + cleaning_cycle) {
logger.info("Started cleaning cyle");
// Clean all databases
for (auto &db : dbs.access()) {
logger.info("Cleaning database \"{}\"", db.first);
DbTransaction t(db.second);
@ -43,11 +47,15 @@ Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
db.first);
logger.error("{}", e.what());
}
// NOTE: Whe should commit even if error occured.
t.trans.commit();
}
last_clean = now;
logger.info("Finished cleaning cyle");
} else {
// Cleaning isn't scheduled for now so i should sleep.
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
@ -56,8 +64,10 @@ Cleaning::Cleaning(ConcurrentMap<std::string, Db> &dbs, size_t cleaning_cycle)
Cleaning::~Cleaning()
{
// Stop cleaning
cleaning.store(false, std::memory_order_release);
for (auto &t : cleaners) {
// Join with cleaners
t.get()->join();
}
}

View File

@ -5,6 +5,7 @@ Db &Dbms::active()
{
Db *active = active_db.load(std::memory_order_acquire);
if (UNLIKELY(active == nullptr)) {
// There is no active database.
return create_default();
} else {
return *active;
@ -19,6 +20,8 @@ Db &Dbms::active(const std::string &name)
// create db if it doesn't exist
auto it = acc.find(name);
if (it == acc.end()) {
// It doesn't exist.
Snapshoter &snap = snapshoter;
it = acc.emplace(name, std::forward_as_tuple(name),
std::forward_as_tuple(name))

View File

@ -21,8 +21,11 @@ bool SnapshotEngine::make_snapshot()
std::lock_guard<std::mutex> lock(guard);
std::time_t now = std::time(nullptr);
if (make_snapshot(now, "full")) {
// Sanpsthot was created so whe should check if some older snapshots
// should be deleted.
clean_snapshots();
return true;
} else {
return false;
}
@ -31,6 +34,8 @@ bool SnapshotEngine::make_snapshot()
void SnapshotEngine::clean_snapshots()
{
logger.info("Started cleaning commit_file");
// Whe first count the number of snapshots that whe know about in commit
// file.
std::vector<std::string> lines;
{
std::ifstream commit_file(snapshot_commit_file());
@ -43,14 +48,19 @@ void SnapshotEngine::clean_snapshots()
int n = lines.size() - max_retained_snapshots;
if (n > 0) {
// Whe have to much snapshots so whe should delete some.
std::ofstream commit_file(snapshot_commit_file(), std::fstream::trunc);
// First whw will rewrite commit file to contain only
// max_retained_snapshots newest snapshots.
for (auto i = n; i < lines.size(); i++) {
commit_file << lines[i] << std::endl;
}
auto res = sys::flush_file_to_disk(commit_file);
if (res == 0) {
// Commit file was succesfully changed so whe can now delete
// snapshots which whe evicted from commit file.
commit_file.close();
logger.info("Removed {} snapshot from commit_file", n);
@ -93,12 +103,16 @@ bool SnapshotEngine::make_snapshot(std::time_t now, const char *type)
auto old_trans =
tx::TransactionRead(db.tx_engine); // Overenginered for incremental
// snapshot. Can be removed.
// Everything is ready for creation of snapshot.
snapshot(t, snap, old_trans);
auto res = sys::flush_file_to_disk(snapshot_file);
if (res == 0) {
// Snapshot was succesfully written to disk.
t.trans.commit();
success = true;
} else {
logger.error("Error {} occured while flushing snapshot file", res);
t.trans.abort();
@ -112,6 +126,8 @@ bool SnapshotEngine::make_snapshot(std::time_t now, const char *type)
}
if (success) {
// Snapshot was succesfully created but for it to be reachable for
// import whe must add it to the end of commit file.
std::ofstream commit_file(snapshot_commit_file(), std::fstream::app);
commit_file << snapshot_file_name << std::endl;
@ -120,6 +136,8 @@ bool SnapshotEngine::make_snapshot(std::time_t now, const char *type)
if (res == 0) {
commit_file.close();
snapshoted_no_v.fetch_add(1);
// Snapshot was succesfully commited.
} else {
logger.error("Error {} occured while flushing commit file", res);
}
@ -139,6 +157,7 @@ bool SnapshotEngine::import()
std::ifstream commit_file(snapshot_commit_file());
// Whe first load all known snpashot file names from commit file.
std::vector<std::string> snapshots;
std::string line;
while (std::getline(commit_file, line)) {
@ -166,8 +185,7 @@ bool SnapshotEngine::import()
} else {
logger.info("Unuccesfully tryed to import snapshot "
"\"{}\" because indexes where unuccesfully "
"with updating",
"\"{}\"",
snapshots.back());
}
@ -179,6 +197,7 @@ bool SnapshotEngine::import()
}
snapshots.pop_back();
// Whe will try to import older snapashot if such one exist.
}
} catch (const std::exception &e) {
@ -289,11 +308,13 @@ void SnapshotEngine::add_indexes(std::vector<IndexDefinition> &v)
std::string SnapshotEngine::snapshot_file(std::time_t const &now,
const char *type)
{
// Current nano time less than second.
auto now_nano = std::chrono::time_point_cast<std::chrono::nanoseconds>(
std::chrono::high_resolution_clock::now())
.time_since_epoch()
.count() %
(1000 * 1000 * 1000);
return snapshot_db_dir() + "/" + std::to_string(now) + "_" +
std::to_string(now_nano) + "_" + type;
}
@ -308,9 +329,11 @@ std::string SnapshotEngine::snapshot_db_dir()
if (!sys::ensure_directory_exists(snapshot_folder)) {
logger.error("Error while creating directory \"{}\"", snapshot_folder);
}
auto db_path = snapshot_folder + "/" + db.name();
if (!sys::ensure_directory_exists(db_path)) {
logger.error("Error while creating directory \"{}\"", db_path);
}
return db_path;
}

View File

@ -12,6 +12,7 @@ Snapshoter::Snapshoter(ConcurrentMap<std::string, Db> &dbs,
size_t snapshot_cycle)
: snapshot_cycle(snapshot_cycle), dbms(dbs)
{
// Start snapshoter thread.
thread = std::make_unique<Thread>([&]() {
logger = logging::log->logger("Snapshoter");
logger.info("Started with snapshoot cycle of {} sec",
@ -46,7 +47,9 @@ void Snapshoter::run()
make_snapshots();
last_snapshot = now;
} else {
// It isn't time for snapshot so i should wait.
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}

View File

@ -7,6 +7,7 @@
void EdgeAccessor::remove() const
{
RecordAccessor::remove();
auto from_v = from();
bool f_from = from_v.fill();
assert(f_from);
@ -15,6 +16,7 @@ void EdgeAccessor::remove() const
bool f_to = to_v.fill();
assert(f_to);
// Detach edge from vertices.
from_v.update().record->data.out.remove(vlist);
to_v.update().record->data.in.remove(vlist);
}

View File

@ -10,6 +10,8 @@ void Garbage::clean()
{
for (auto it = gar.begin(); it != gar.end(); it++) {
if (it->first.all_finished(engine) && it.remove()) {
// All transactions who could have seen data are finished and this
// thread successfull removed item from list.
it->second->~DeleteSensitive();
}
}

View File

@ -51,10 +51,14 @@ auto NonUniqueUnorderedIndex<T, K>::for_range_exact(DbAccessor &t_v,
it = list.cbegin(), end = list.cend(), from = from_v, to = to_v,
t = t_v
]() mutable->auto {
// NonUniqueUnorderedIndex is stupid so it must iterate through all
// index records to determine which are iniside borders.
while (it != end) {
const IndexRecord<T, K> &r = *it;
if (from < r.key && to > r.key &&
r.is_valid(t.db_transaction.trans)) {
// record r is inside borders and is valid for current
// transaction.
const typename T::accessor_t acc =
r.access(t.db_transaction);
it++;

View File

@ -57,9 +57,13 @@ auto UniqueOrderedIndex<T, K>::for_range_exact(DbAccessor &t_v,
// Sorted order must be checked
if (this->type().order == Ascending && from_v.key.is_present()) {
begin = acc.cfind_or_larger(from_v);
} else if (this->type().order == Descending && to_v.key.is_present()) {
// Order is descending so whe have to start from the end border and
// iterate to the from border.
begin = acc.cfind_or_larger(to_v);
end = from_v;
} else {
assert(this->type().order != None);
}
@ -71,9 +75,15 @@ auto UniqueOrderedIndex<T, K>::for_range_exact(DbAccessor &t_v,
it = std::move(begin), b_end = std::move(end), t = t_v,
hold_acc = std::move(acc)
]() mutable->auto {
// UniqueOrderedIndex is smart so he has to iterate only through
// records which are inside borders. He knows that he will start
// with items larger than from_v but he needs to check if it has
// reached end border.
while (b_end >= it->key) {
const IndexRecord<T, K> &r = *it;
if (r.is_valid(t.db_transaction.trans)) {
// record r is inside borders and is valid for current
// transaction.
const typename T::accessor_t acc =
r.access(t.db_transaction);
it++;

View File

@ -12,6 +12,7 @@ bool IndexHolder<TG, K>::set_index(std::unique_ptr<IndexBase<TG, K>> inx)
if (index.compare_exchange_strong(npr<TG, K>, inx.get())) {
inx.release();
return true;
} else {
return false;
}
@ -23,6 +24,7 @@ OptionPtr<IndexBase<TG, K>> IndexHolder<TG, K>::get_read() const
auto loaded = index.load(std::memory_order_acquire);
if (loaded == nullptr || !loaded->can_read()) {
return OptionPtr<IndexBase<TG, K>>();
} else {
return make_option_ptr(loaded);
}
@ -35,6 +37,7 @@ IndexHolder<TG, K>::get_write(const tx::Transaction &t) const
auto loaded = index.load(std::memory_order_acquire);
if (loaded == nullptr || !loaded->is_obliged_to_insert(t)) {
return OptionPtr<IndexBase<TG, K>>();
} else {
return make_option_ptr(loaded);
}
@ -46,6 +49,7 @@ IndexHolder<TG, K>::remove_index(IndexBase<TG, K> *expected)
{
if (index.compare_exchange_strong(expected, nullptr)) {
return make_option(std::unique_ptr<IndexBase<TG, K>>(expected));
} else {
return make_option(std::unique_ptr<IndexBase<TG, K>>());
}
@ -57,6 +61,7 @@ Option<std::unique_ptr<IndexBase<TG, K>>> IndexHolder<TG, K>::remove_index()
auto removed = index.exchange(nullptr);
if (removed == nullptr) {
return make_option<std::unique_ptr<IndexBase<TG, K>>>();
} else {
return make_option(std::unique_ptr<IndexBase<TG, K>>(removed));
}

View File

@ -11,7 +11,7 @@ bool Indexes::add_index(IndexDefinition id)
std::function<bool(DbTransaction &)> finish = [](auto &t) { return false; };
// Creates transaction and during it's creation adds index into it's
// place. Also created finish closure which will add necessary elements
// place. Also creates finish closure which will add necessary elements
// into index.
DbTransaction t(db, db.tx_engine.begin([&](auto &t) mutable {
size_t code = id.loc.location_code();

View File

@ -35,8 +35,7 @@ void Properties<TG>::set(StoredProperty<TG> &&value)
for (auto &prop : props) {
if (prop.key == value.key) {
// It is necessary to change key because the types from before and
// now
// could be different.
// now could be different.
StoredProperty<TG> &sp = const_cast<StoredProperty<TG> &>(prop);
sp = std::move(value);
return;

View File

@ -53,16 +53,27 @@ void VertexAccessor::remove() const
{
RecordAccessor::remove();
// Detach all out edges.
for (auto evr : record->data.out) {
auto ea = EdgeAccessor(evr, db);
// Delete edge
ea.vlist->remove(db.trans);
// Remove edge from it's to vertex.
auto to_v = ea.to();
to_v.fill();
to_v.update().record->data.in.remove(ea.vlist);
}
// Detach all in edges.
for (auto evr : record->data.in) {
auto ea = EdgeAccessor(evr, db);
// Delete edge
ea.vlist->remove(db.trans);
// Remove edge from it's from vertex.
auto from_v = ea.from();
from_v.fill();
from_v.update().record->data.out.remove(ea.vlist);

View File

@ -61,7 +61,7 @@ void add_property(Db &db, StoredProperty<TypeGroupVertex> &prop)
{
DbAccessor t(db);
t.vertex_access().fill().for_all([&](auto va) { va.set(prop); });
t.vertex_access().fill().update().for_all([&](auto va) { va.set(prop); });
assert(t.commit());
}
@ -73,7 +73,7 @@ void add_vertex_property_serial_int(Db &db, PropertyFamily<TypeGroupVertex> &f)
auto key = f.get(Int64::type).family_key();
size_t i = 0;
t.vertex_access().fill().for_all([&](auto va) mutable {
t.vertex_access().fill().update().for_all([&](auto va) mutable {
va.set(StoredProperty<TypeGroupVertex>(Int64(i), key));
i++;
});
@ -88,7 +88,7 @@ void add_edge_property_serial_int(Db &db, PropertyFamily<TypeGroupEdge> &f)
auto key = f.get(Int64::type).family_key();
size_t i = 0;
t.edge_access().fill().for_all([&](auto va) mutable {
t.edge_access().fill().update().for_all([&](auto va) mutable {
va.set(StoredProperty<TypeGroupEdge>(Int64(i), key));
i++;
});
@ -186,7 +186,7 @@ int main(void)
logging::init_async();
logging::log->pipe(std::make_unique<Stdout>());
size_t cvl_n = 1000;
size_t cvl_n = 1;
std::string create_vertex_label =
"CREATE (n:LABEL {name: \"cleaner_test\"}) RETURN n";
@ -223,11 +223,22 @@ int main(void)
Db db("index", false);
assert(db.indexes().add_index(vertex_property_nonunique_unordered));
assert(db.indexes().add_index(edge_property_nonunique_unordered));
run(cvl_n, create_vertex_label, db);
add_edge(cvl_n, db);
auto sp = StoredProperty<TypeGroupVertex>(
Int64(0), db.graph.vertices.property_family_find_or_create("prop")
.get(Int64::type)
.family_key());
add_property(db, sp);
assert(cvl_n ==
size(db, db.graph.vertices.property_family_find_or_create("prop")
.index));
add_edge(cvl_n, db);
add_edge_property_serial_int(
db, db.graph.edges.property_family_find_or_create("prop"));
assert(
cvl_n ==
size(db,