diff --git a/data_structures/skiplist/skiplist.hpp b/data_structures/skiplist/skiplist.hpp index aa6ece728..0d6c8f218 100644 --- a/data_structures/skiplist/skiplist.hpp +++ b/data_structures/skiplist/skiplist.hpp @@ -1,82 +1,82 @@ -#ifndef MEMGRAPH_DATA_STRUCTURES_SKIPLIST_HPP -#define MEMGRAPH_DATA_STRUCTURES_SKIPLIST_HPP +#ifndef MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPLIST_HPP +#define MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPLIST_HPP #include <algorithm> #include <cstdlib> #include <array> -#include "utils/random/xorshift.hpp" - -size_t new_height(int max_height) -{ - uint64_t rand = xorshift::next(); - size_t height = 0; - - while(max_height-- && (rand >>= 1) & 1) - height++; - - return height; -} - +#include "new_height.hpp" +#include "skipnode.hpp" template <class K, class T> -struct SkipNode -{ - SkipNode(K* key, T* item) - : key(key), item(item) {} - - K* key; - T* item; - - SkipNode* up; - SkipNode* forward; -}; - -template <class K, - class T, - size_t height = 16> class SkipList { using Node = SkipNode<K, T>; - using Tower = std::array<Node, height>; public: - SkipList() - { - head.fill(nullptr); - } + SkipList(size_t max_height); - T* get(const K* const key) - { - size_t h = height; - - while(h--) - { - - } - } - - void put(const K* key, T* item) - { - auto* node = new SkipNode<T, K>(key, item); - - Tower trace; - - size_t h = height - 1; - - while(h--) - { - - } - } - - void del(const K* const key) - { - - } + T* get(const K* const key); + void put(const K* key, T* item); + void del(const K* const key); private: - Tower head; + size_t level; + Node* header; }; + +template <class K, class T> +SkipList<K, T>::SkipList(size_t level) + : level(level) +{ + header = new Node(level); + auto sentinel = new Node(); + + for(int i = 0; i < level; ++i) + header->forward[i] = sentinel; +} + +template <class K, class T> +T* SkipList<K, T>::get(const K* const key) +{ + Node* current = header; + + for(int i = level - 1; i >= 0; --i) + { + Node* next = current->forward[i]; + + while(next->key != nullptr && *next->key < *key) + current = current->forward[i]; + } + + return current->item; +} + +template <class K, class T> +void SkipList<K, T>::put(const K* key, T* item) +{ + auto height = new_height(level); + auto node = new Node(key, item, height); + + // needed to update higher level forward pointers + int trace[level]; + + Node* current = header; + + for(int i = level - 1; i >= 0; --i) + { + Node* next = current->forward[i]; + + while(next->key != nullptr && *next->key < *key) + current = current->forward[i]; + } +} + +template <class K, class T> +void SkipList<K, T>::del(const K* const key) +{ + +} + #endif diff --git a/data_structures/skiplist/skipnode.hpp b/data_structures/skiplist/skipnode.hpp new file mode 100644 index 000000000..9c8a0adc0 --- /dev/null +++ b/data_structures/skiplist/skipnode.hpp @@ -0,0 +1,67 @@ +#ifndef MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPNODE_HPP +#define MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPNODE_HPP + +#include <cstdlib> +#include <atomic> + +#include "utils/sync/spinlock.hpp" + +// concurrent skiplist node based on the implementation described in +// "A Provably Correct Scalable Concurrent Skip List" +// https://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/OPODIS2006-BA.pdf + +template <class K, class T> +struct SkipNode +{ + enum flags { + MARKED_FOR_REMOVAL = 1, + FULLY_LINKED = 1 << 1 + }; + + static SkipNode* create(); + static SkipNode* destroy(); + +private: + SkipNode(); + ~SkipNode(); + + K* key; + T* item; + + std::atomic<uint8_t> flags; + const uint8_t level; + SpinLock lock; + + // this creates an array of the size zero locally inside the SkipNode + // struct. we can't put any sensible value here since we don't know + // what size it will be untill the skipnode is allocated. we could make + // it a SkipNode** but then we would have two memory allocations, one for + // SkipNode and one for the forward list and malloc calls are expensive! + + // we're gonna cheat here. we'll make this a zero length list and then + // allocate enough memory for the SkipNode struct to store more than zero + // elements (precisely *level* elements). c++ does not check bounds so we + // can access anything we want! + std::atomic<SkipNode<K, T>*> forward[0]; +}; + +template <class K, class T> +SkipNode<K, T>::SkipNode(size_t level) +{ + forward = new SkipNode*[level]; +} + +template <class K, class T> +SkipNode<K, T>::SkipNode(K* key, T* item, size_t level) + : key(key), item(item) +{ + forward = new SkipNode*[level]; +} + +template <class K, class T> +SkipNode<K, T>::~SkipNode() +{ + delete forward; +} + +#endif diff --git a/utils/sync/caslock.hpp b/utils/sync/caslock.hpp new file mode 100644 index 000000000..3f6f94691 --- /dev/null +++ b/utils/sync/caslock.hpp @@ -0,0 +1,20 @@ +#ifndef MEMGRAPH_UTILS_SYNC_CASLOCK_HPP +#define MEMGRAPH_UTILS_SYNC_CASLOCK_HPP + +#include <atomic> + +struct CasLock +{ + uint8_t lock; + + void lock() + { + } + + void unlock() + { + } + +}; + +#endif diff --git a/utils/sync/spinlock.hpp b/utils/sync/spinlock.hpp index 3c7a686fd..27e6abd6c 100644 --- a/utils/sync/spinlock.hpp +++ b/utils/sync/spinlock.hpp @@ -7,22 +7,23 @@ class SpinLock { public: - void acquire(); - void release(); + void lock(); + void unlock(); private: - std::atomic_flag lock = ATOMIC_FLAG_INIT; + // guaranteed by standard to be lock free! + std::atomic_flag lock_flag = ATOMIC_FLAG_INIT; }; -void SpinLock::acquire() +void SpinLock::lock() { - while(lock.test_and_set(std::memory_order_acquire)) + while(lock_flag.test_and_set(std::memory_order_acquire)) usleep(250); } -void SpinLock::release() +void SpinLock::unlock() { - lock.clear(std::memory_order_release); + lock_flag.clear(std::memory_order_release); } #endif