made some progress on lock free skip lists

This commit is contained in:
Dominik Tomičević 2015-06-25 03:06:00 +02:00
parent adc80bf29a
commit 255079fc84
4 changed files with 159 additions and 71 deletions
data_structures/skiplist
utils/sync

View File

@ -1,82 +1,82 @@
#ifndef MEMGRAPH_DATA_STRUCTURES_SKIPLIST_HPP
#define MEMGRAPH_DATA_STRUCTURES_SKIPLIST_HPP
#ifndef MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPLIST_HPP
#define MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPLIST_HPP
#include <algorithm>
#include <cstdlib>
#include <array>
#include "utils/random/xorshift.hpp"
size_t new_height(int max_height)
{
uint64_t rand = xorshift::next();
size_t height = 0;
while(max_height-- && (rand >>= 1) & 1)
height++;
return height;
}
#include "new_height.hpp"
#include "skipnode.hpp"
template <class K, class T>
struct SkipNode
{
SkipNode(K* key, T* item)
: key(key), item(item) {}
K* key;
T* item;
SkipNode* up;
SkipNode* forward;
};
template <class K,
class T,
size_t height = 16>
class SkipList
{
using Node = SkipNode<K, T>;
using Tower = std::array<Node, height>;
public:
SkipList()
{
head.fill(nullptr);
}
SkipList(size_t max_height);
T* get(const K* const key)
{
size_t h = height;
while(h--)
{
}
}
void put(const K* key, T* item)
{
auto* node = new SkipNode<T, K>(key, item);
Tower trace;
size_t h = height - 1;
while(h--)
{
}
}
void del(const K* const key)
{
}
T* get(const K* const key);
void put(const K* key, T* item);
void del(const K* const key);
private:
Tower head;
size_t level;
Node* header;
};
template <class K, class T>
SkipList<K, T>::SkipList(size_t level)
: level(level)
{
header = new Node(level);
auto sentinel = new Node();
for(int i = 0; i < level; ++i)
header->forward[i] = sentinel;
}
template <class K, class T>
T* SkipList<K, T>::get(const K* const key)
{
Node* current = header;
for(int i = level - 1; i >= 0; --i)
{
Node* next = current->forward[i];
while(next->key != nullptr && *next->key < *key)
current = current->forward[i];
}
return current->item;
}
template <class K, class T>
void SkipList<K, T>::put(const K* key, T* item)
{
auto height = new_height(level);
auto node = new Node(key, item, height);
// needed to update higher level forward pointers
int trace[level];
Node* current = header;
for(int i = level - 1; i >= 0; --i)
{
Node* next = current->forward[i];
while(next->key != nullptr && *next->key < *key)
current = current->forward[i];
}
}
template <class K, class T>
void SkipList<K, T>::del(const K* const key)
{
}
#endif

View File

@ -0,0 +1,67 @@
#ifndef MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPNODE_HPP
#define MEMGRAPH_DATA_STRUCTURES_SKIPLIST_SKIPNODE_HPP
#include <cstdlib>
#include <atomic>
#include "utils/sync/spinlock.hpp"
// concurrent skiplist node based on the implementation described in
// "A Provably Correct Scalable Concurrent Skip List"
// https://www.cs.tau.ac.il/~shanir/nir-pubs-web/Papers/OPODIS2006-BA.pdf
template <class K, class T>
struct SkipNode
{
enum flags {
MARKED_FOR_REMOVAL = 1,
FULLY_LINKED = 1 << 1
};
static SkipNode* create();
static SkipNode* destroy();
private:
SkipNode();
~SkipNode();
K* key;
T* item;
std::atomic<uint8_t> flags;
const uint8_t level;
SpinLock lock;
// this creates an array of the size zero locally inside the SkipNode
// struct. we can't put any sensible value here since we don't know
// what size it will be untill the skipnode is allocated. we could make
// it a SkipNode** but then we would have two memory allocations, one for
// SkipNode and one for the forward list and malloc calls are expensive!
// we're gonna cheat here. we'll make this a zero length list and then
// allocate enough memory for the SkipNode struct to store more than zero
// elements (precisely *level* elements). c++ does not check bounds so we
// can access anything we want!
std::atomic<SkipNode<K, T>*> forward[0];
};
template <class K, class T>
SkipNode<K, T>::SkipNode(size_t level)
{
forward = new SkipNode*[level];
}
template <class K, class T>
SkipNode<K, T>::SkipNode(K* key, T* item, size_t level)
: key(key), item(item)
{
forward = new SkipNode*[level];
}
template <class K, class T>
SkipNode<K, T>::~SkipNode()
{
delete forward;
}
#endif

20
utils/sync/caslock.hpp Normal file
View File

@ -0,0 +1,20 @@
#ifndef MEMGRAPH_UTILS_SYNC_CASLOCK_HPP
#define MEMGRAPH_UTILS_SYNC_CASLOCK_HPP
#include <atomic>
struct CasLock
{
uint8_t lock;
void lock()
{
}
void unlock()
{
}
};
#endif

View File

@ -7,22 +7,23 @@
class SpinLock
{
public:
void acquire();
void release();
void lock();
void unlock();
private:
std::atomic_flag lock = ATOMIC_FLAG_INIT;
// guaranteed by standard to be lock free!
std::atomic_flag lock_flag = ATOMIC_FLAG_INIT;
};
void SpinLock::acquire()
void SpinLock::lock()
{
while(lock.test_and_set(std::memory_order_acquire))
while(lock_flag.test_and_set(std::memory_order_acquire))
usleep(250);
}
void SpinLock::release()
void SpinLock::unlock()
{
lock.clear(std::memory_order_release);
lock_flag.clear(std::memory_order_release);
}
#endif