From 2727c26eb59fa9ea38b496d3752db2610ec53171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Tomic=CC=8Cevic=CC=81?= Date: Thu, 24 Sep 2015 18:36:16 +0200 Subject: [PATCH] implemented a lock free and (almost) wait free bitset --- data_structures/bitset/bitblock.hpp | 79 ----------- data_structures/bitset/dynamic_bitset.hpp | 151 +++++++++++++++++----- 2 files changed, 120 insertions(+), 110 deletions(-) delete mode 100644 data_structures/bitset/bitblock.hpp diff --git a/data_structures/bitset/bitblock.hpp b/data_structures/bitset/bitblock.hpp deleted file mode 100644 index 52adc1f1c..000000000 --- a/data_structures/bitset/bitblock.hpp +++ /dev/null @@ -1,79 +0,0 @@ -#ifndef MEMGRAPH_DATA_STRUCTURES_BITBLOCK_HPP -#define MEMGRAPH_DATA_STRUCTURES_BITBLOCK_HPP - -#include -#include -#include -#include - -template -struct BitBlock -{ - BitBlock() : block(0) {} - - static constexpr size_t bits = sizeof(block_t) * 8; - static constexpr size_t size = bits / N; - - // e.g. if N = 2, - // mask = 11111111 >> 6 = 00000011 - static constexpr block_t mask = (block_t)(-1) >> (bits - N); - - block_t at(size_t n) - { - assert(n < size); - - block_t b = block.load(std::memory_order_relaxed); - return (b >> n * N) & mask; - } - - // caution! this method assumes that the value on the sub-block n is 0..0! - void set(size_t n, block_t value) - { - assert(n < size); - assert(value < (1UL << N)); - block_t b, new_value; - - while(true) - { - b = block.load(std::memory_order_relaxed); - new_value = b | (value << n * N); - - if(block.compare_exchange_weak(b, new_value, - std::memory_order_release, - std::memory_order_relaxed)) - { - break; - } - - // reduces contention and works better than pure while - usleep(250); - } - } - - void clear(size_t n) - { - assert(n < size); - block_t b, new_value; - - while(true) - { - b = block.load(std::memory_order_relaxed); - new_value = b & ~(mask << n * N); - - if(block.compare_exchange_weak(b, new_value, - std::memory_order_release, - std::memory_order_relaxed)) - { - break; - } - - // reduces contention and works better than pure while - usleep(250); - } - } - - std::atomic block; -}; - -#endif diff --git a/data_structures/bitset/dynamic_bitset.hpp b/data_structures/bitset/dynamic_bitset.hpp index cc2cee77d..b41fc4b75 100644 --- a/data_structures/bitset/dynamic_bitset.hpp +++ b/data_structures/bitset/dynamic_bitset.hpp @@ -1,59 +1,148 @@ #ifndef MEMGRAPH_DATA_STRUCTURES_BITSET_DYNAMIC_BITSET_HPP #define MEMGRAPH_DATA_STRUCTURES_BITSET_DYNAMIC_BITSET_HPP +#include #include +#include #include +#include #include -#include "sync/spinlock.hpp" -#include "bitblock.hpp" +#include "threading/sync/lockable.hpp" +#include "threading/sync/spinlock.hpp" -template -class DynamicBitset +template +class DynamicBitset : Lockable { - using Block = BitBlock; + struct Block + { + Block(Block&) = delete; + Block(Block&&) = delete; + + static constexpr size_t size = sizeof(block_t) * 8; + + constexpr block_t bitmask(size_t group_size) + { + return (block_t)(-1) >> (size - group_size); + } + + block_t at(size_t k, size_t n, std::memory_order order) + { + assert(k + n - 1 < size); + return (block.load(order) >> k) & bitmask(n); + } + + void set(size_t k, size_t n, std::memory_order order) + { + assert(k + n - 1 < size); + block.fetch_or(bitmask(n) << k, order); + } + + void clear(size_t k, size_t n, std::memory_order order) + { + assert(k + n - 1 < size); + block.fetch_and(~(bitmask(n) << k), order); + } + + std::atomic block {0}; + }; + + struct Chunk + { + Chunk() : next(nullptr) + { + static_assert(chunk_size % sizeof(block_t) == 0, + "chunk size not divisible by block size"); + } + + Chunk(Chunk&) = delete; + Chunk(Chunk&&) = delete; + + ~Chunk() + { + delete next; + } + + static constexpr size_t size = chunk_size * Block::size; + static constexpr size_t n_blocks = chunk_size / sizeof(block_t); + + block_t at(size_t k, size_t n, std::memory_order order) + { + return blocks[k / Block::size].at(k % Block::size, n, order); + } + + void set(size_t k, size_t n, std::memory_order order) + { + blocks[k / Block::size].set(k % Block::size, n, order); + } + + void clear(size_t k, size_t n, std::memory_order order) + { + blocks[k / Block::size].clear(k % Block::size, n, order); + } + + Block blocks[n_blocks]; + std::atomic next; + }; public: - DynamicBitset(size_t n) : data(container_size(n)) {} - void resize(size_t n) + block_t at(size_t k, size_t n = 1) { - auto guard = acquire(); - data.resize(container_size(n)); + auto chunk = find_chunk(k); + chunk.at(k, n, std::memory_order_seq_cst); } - size_t size() const + void set(size_t k, size_t n = 1) { - return data.size(); + auto chunk = find_chunk(k); + chunk.set(k, n, std::memory_order_seq_cst); } - block_t at(size_t n) + void clear(size_t k, size_t n = 1) { - return data[n / Block::size].at(n % Block::size); - } - - void set(size_t n, block_t value) - { - data[n / Block::size].set(n % Block::size, value); + auto chunk = find_chunk(k); + chunk.clear(k, n, std::memory_order_seq_cst); } private: - - std::unique_lock acquire() + Chunk& find_chunk(size_t& k) { - return std::unique_lock(lock); + Chunk* chunk = head.load(), next = nullptr; + + // while i'm not in the right chunk + // (my index is bigger than the size of this chunk) + while(k >= Chunk::size) + { + next = chunk->next.load(); + + // if a next chunk exists, switch to it and decrement my + // pointer by the size of the current chunk + if(next != nullptr) + { + chunk = next; + k -= Chunk::size; + continue; + } + + // the next chunk does not exist and we need it. take an exclusive + // lock to prevent others that also want to create a new chunk + // from creating it + auto guard = acquire(); + + // double-check locking. if the chunk exists now, some other thread + // has just created it, continue searching for my chunk + if(chunk->next.load() != nullptr) + continue; + + chunk->next.store(new Chunk()); + } + + assert(chunk != nullptr); + return *chunk; } - size_t container_size(size_t num_elems) - { - return (num_elems + N - 1) / N; - } - - std::vector data; - - lock_t lock; + std::atomic head; }; #endif