implemented a lock free and (almost) wait free bitset

This commit is contained in:
Dominik Tomičević 2015-09-24 18:36:16 +02:00
parent 43e975d3cc
commit 2727c26eb5
2 changed files with 120 additions and 110 deletions

View File

@ -1,79 +0,0 @@
#ifndef MEMGRAPH_DATA_STRUCTURES_BITBLOCK_HPP
#define MEMGRAPH_DATA_STRUCTURES_BITBLOCK_HPP
#include <cstdlib>
#include <cassert>
#include <atomic>
#include <unistd.h>
template<class block_t = uint8_t,
size_t N = 1>
struct BitBlock
{
BitBlock() : block(0) {}
static constexpr size_t bits = sizeof(block_t) * 8;
static constexpr size_t size = bits / N;
// e.g. if N = 2,
// mask = 11111111 >> 6 = 00000011
static constexpr block_t mask = (block_t)(-1) >> (bits - N);
block_t at(size_t n)
{
assert(n < size);
block_t b = block.load(std::memory_order_relaxed);
return (b >> n * N) & mask;
}
// caution! this method assumes that the value on the sub-block n is 0..0!
void set(size_t n, block_t value)
{
assert(n < size);
assert(value < (1UL << N));
block_t b, new_value;
while(true)
{
b = block.load(std::memory_order_relaxed);
new_value = b | (value << n * N);
if(block.compare_exchange_weak(b, new_value,
std::memory_order_release,
std::memory_order_relaxed))
{
break;
}
// reduces contention and works better than pure while
usleep(250);
}
}
void clear(size_t n)
{
assert(n < size);
block_t b, new_value;
while(true)
{
b = block.load(std::memory_order_relaxed);
new_value = b & ~(mask << n * N);
if(block.compare_exchange_weak(b, new_value,
std::memory_order_release,
std::memory_order_relaxed))
{
break;
}
// reduces contention and works better than pure while
usleep(250);
}
}
std::atomic<block_t> block;
};
#endif

View File

@ -1,59 +1,148 @@
#ifndef MEMGRAPH_DATA_STRUCTURES_BITSET_DYNAMIC_BITSET_HPP
#define MEMGRAPH_DATA_STRUCTURES_BITSET_DYNAMIC_BITSET_HPP
#include <cassert>
#include <vector>
#include <atomic>
#include <mutex>
#include <condition_variable>
#include <unistd.h>
#include "sync/spinlock.hpp"
#include "bitblock.hpp"
#include "threading/sync/lockable.hpp"
#include "threading/sync/spinlock.hpp"
template <class block_t,
size_t N,
class lock_t>
class DynamicBitset
template <class block_t = uint8_t, size_t chunk_size = 32768>
class DynamicBitset : Lockable<SpinLock>
{
using Block = BitBlock<block_t, N>;
struct Block
{
Block(Block&) = delete;
Block(Block&&) = delete;
static constexpr size_t size = sizeof(block_t) * 8;
constexpr block_t bitmask(size_t group_size)
{
return (block_t)(-1) >> (size - group_size);
}
block_t at(size_t k, size_t n, std::memory_order order)
{
assert(k + n - 1 < size);
return (block.load(order) >> k) & bitmask(n);
}
void set(size_t k, size_t n, std::memory_order order)
{
assert(k + n - 1 < size);
block.fetch_or(bitmask(n) << k, order);
}
void clear(size_t k, size_t n, std::memory_order order)
{
assert(k + n - 1 < size);
block.fetch_and(~(bitmask(n) << k), order);
}
std::atomic<block_t> block {0};
};
struct Chunk
{
Chunk() : next(nullptr)
{
static_assert(chunk_size % sizeof(block_t) == 0,
"chunk size not divisible by block size");
}
Chunk(Chunk&) = delete;
Chunk(Chunk&&) = delete;
~Chunk()
{
delete next;
}
static constexpr size_t size = chunk_size * Block::size;
static constexpr size_t n_blocks = chunk_size / sizeof(block_t);
block_t at(size_t k, size_t n, std::memory_order order)
{
return blocks[k / Block::size].at(k % Block::size, n, order);
}
void set(size_t k, size_t n, std::memory_order order)
{
blocks[k / Block::size].set(k % Block::size, n, order);
}
void clear(size_t k, size_t n, std::memory_order order)
{
blocks[k / Block::size].clear(k % Block::size, n, order);
}
Block blocks[n_blocks];
std::atomic<Chunk*> next;
};
public:
DynamicBitset(size_t n) : data(container_size(n)) {}
void resize(size_t n)
block_t at(size_t k, size_t n = 1)
{
auto guard = acquire();
data.resize(container_size(n));
auto chunk = find_chunk(k);
chunk.at(k, n, std::memory_order_seq_cst);
}
size_t size() const
void set(size_t k, size_t n = 1)
{
return data.size();
auto chunk = find_chunk(k);
chunk.set(k, n, std::memory_order_seq_cst);
}
block_t at(size_t n)
void clear(size_t k, size_t n = 1)
{
return data[n / Block::size].at(n % Block::size);
}
void set(size_t n, block_t value)
{
data[n / Block::size].set(n % Block::size, value);
auto chunk = find_chunk(k);
chunk.clear(k, n, std::memory_order_seq_cst);
}
private:
std::unique_lock<lock_t> acquire()
Chunk& find_chunk(size_t& k)
{
return std::unique_lock<lock_t>(lock);
Chunk* chunk = head.load(), next = nullptr;
// while i'm not in the right chunk
// (my index is bigger than the size of this chunk)
while(k >= Chunk::size)
{
next = chunk->next.load();
// if a next chunk exists, switch to it and decrement my
// pointer by the size of the current chunk
if(next != nullptr)
{
chunk = next;
k -= Chunk::size;
continue;
}
// the next chunk does not exist and we need it. take an exclusive
// lock to prevent others that also want to create a new chunk
// from creating it
auto guard = acquire();
// double-check locking. if the chunk exists now, some other thread
// has just created it, continue searching for my chunk
if(chunk->next.load() != nullptr)
continue;
chunk->next.store(new Chunk());
}
assert(chunk != nullptr);
return *chunk;
}
size_t container_size(size_t num_elems)
{
return (num_elems + N - 1) / N;
}
std::vector<Block> data;
lock_t lock;
std::atomic<Chunk*> head;
};
#endif