Implement a PoolResource for allocations
Reviewers: mtomic, mferencevic Reviewed By: mtomic Subscribers: pullbot Differential Revision: https://phabricator.memgraph.io/D2147
This commit is contained in:
parent
a417ef36f1
commit
76d8020169
@ -1,6 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <type_traits>
|
||||
|
||||
#include <glog/logging.h>
|
||||
@ -23,4 +24,18 @@ inline uint64_t Log2(uint64_t val) {
|
||||
int ret = __builtin_clzl(val);
|
||||
return 64UL - static_cast<uint64_t>(ret) - 1UL;
|
||||
}
|
||||
|
||||
/// Return `true` if `val` is a power of 2.
|
||||
inline bool IsPow2(uint64_t val) noexcept {
|
||||
return val != 0ULL && (val & (val - 1ULL)) == 0ULL;
|
||||
}
|
||||
|
||||
/// Return `val` if it is power of 2, otherwise get the next power of 2 value.
|
||||
/// If `val` is sufficiently large, the next power of 2 value may not fit into
|
||||
/// the result type and you will get a wrapped value to 1ULL.
|
||||
inline uint64_t Ceil2(uint64_t val) noexcept {
|
||||
if (val == 0ULL || val == 1ULL) return 1ULL;
|
||||
return 1ULL << (Log2(val - 1ULL) + 1ULL);
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
|
@ -1,28 +1,19 @@
|
||||
#include "utils/memory.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstdint>
|
||||
#include <limits>
|
||||
#include <new>
|
||||
#include <type_traits>
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
namespace utils {
|
||||
|
||||
// MonotonicBufferResource
|
||||
|
||||
namespace {
|
||||
|
||||
// NOTE: std::bad_alloc has no constructor accepting a message, so we wrap our
|
||||
// exceptions in this class.
|
||||
class BadAlloc final : public std::bad_alloc {
|
||||
std::string msg_;
|
||||
|
||||
public:
|
||||
explicit BadAlloc(const std::string &msg) : msg_(msg) {}
|
||||
|
||||
const char *what() const noexcept override { return msg_.c_str(); }
|
||||
};
|
||||
|
||||
size_t GrowMonotonicBuffer(size_t current_size, size_t max_size) {
|
||||
double next_size = current_size * 1.34;
|
||||
if (next_size >= static_cast<double>(max_size)) {
|
||||
@ -78,8 +69,9 @@ MonotonicBufferResource &MonotonicBufferResource::operator=(
|
||||
void MonotonicBufferResource::Release() {
|
||||
for (auto *b = current_buffer_; b;) {
|
||||
auto *next = b->next;
|
||||
auto capacity = b->capacity;
|
||||
b->~Buffer();
|
||||
memory_->Deallocate(b, sizeof(b) + b->capacity);
|
||||
memory_->Deallocate(b, sizeof(*b) + capacity);
|
||||
b = next;
|
||||
}
|
||||
current_buffer_ = nullptr;
|
||||
@ -88,10 +80,9 @@ void MonotonicBufferResource::Release() {
|
||||
|
||||
void *MonotonicBufferResource::DoAllocate(size_t bytes, size_t alignment) {
|
||||
static_assert(std::is_same_v<size_t, uintptr_t>);
|
||||
const bool is_pow2 = alignment != 0U && (alignment & (alignment - 1U)) == 0U;
|
||||
if (bytes == 0U || !is_pow2) throw BadAlloc("Invalid allocation request");
|
||||
if (alignment > alignof(std::max_align_t))
|
||||
alignment = alignof(std::max_align_t);
|
||||
throw BadAlloc(
|
||||
"Alignment greater than alignof(std::max_align_t) is unsupported");
|
||||
|
||||
auto push_current_buffer = [this, bytes](size_t next_size) {
|
||||
// Set capacity so that the bytes fit.
|
||||
@ -141,4 +132,221 @@ void *MonotonicBufferResource::DoAllocate(size_t bytes, size_t alignment) {
|
||||
|
||||
// MonotonicBufferResource END
|
||||
|
||||
// PoolResource
|
||||
//
|
||||
// Implementation is partially based on "Small Object Allocation" implementation
|
||||
// from "Modern C++ Design" by Andrei Alexandrescu. While some other parts are
|
||||
// based on `libstdc++-9.1` implementation.
|
||||
|
||||
namespace impl {
|
||||
|
||||
Pool::Pool(size_t block_size, unsigned char blocks_per_chunk,
|
||||
MemoryResource *memory)
|
||||
: blocks_per_chunk_(blocks_per_chunk),
|
||||
block_size_(block_size),
|
||||
chunks_(memory) {}
|
||||
|
||||
Pool::~Pool() {
|
||||
CHECK(chunks_.empty()) << "You need to call Release before destruction!";
|
||||
}
|
||||
|
||||
void *Pool::Allocate() {
|
||||
auto allocate_block_from_chunk = [this](Chunk *chunk) {
|
||||
unsigned char *available_block =
|
||||
chunk->data + (chunk->first_available_block_ix * block_size_);
|
||||
// Update free-list pointer (index in our case) by reading "next" from the
|
||||
// available_block.
|
||||
chunk->first_available_block_ix = *available_block;
|
||||
--chunk->blocks_available;
|
||||
return available_block;
|
||||
};
|
||||
if (last_alloc_chunk_ && last_alloc_chunk_->blocks_available > 0U)
|
||||
return allocate_block_from_chunk(last_alloc_chunk_);
|
||||
// Find a Chunk with available memory.
|
||||
for (auto &chunk : chunks_) {
|
||||
if (chunk.blocks_available > 0U) {
|
||||
last_alloc_chunk_ = &chunk;
|
||||
return allocate_block_from_chunk(last_alloc_chunk_);
|
||||
}
|
||||
}
|
||||
// We haven't found a Chunk with available memory, so allocate a new one.
|
||||
if (block_size_ > std::numeric_limits<size_t>::max() / blocks_per_chunk_)
|
||||
throw BadAlloc("Allocation size overflow");
|
||||
size_t data_size = blocks_per_chunk_ * block_size_;
|
||||
// Use the next pow2 of block_size_ as alignment, so that we cover alignment
|
||||
// requests between 1 and block_size_. Users of this class should make sure
|
||||
// that requested alignment of particular blocks is never greater than the
|
||||
// block itself.
|
||||
size_t alignment = Ceil2(block_size_);
|
||||
if (alignment < block_size_) throw BadAlloc("Allocation alignment overflow");
|
||||
auto *data = reinterpret_cast<unsigned char *>(
|
||||
GetUpstreamResource()->Allocate(data_size, alignment));
|
||||
// Form a free-list of blocks in data.
|
||||
for (unsigned char i = 0U; i < blocks_per_chunk_; ++i) {
|
||||
*(data + (i * block_size_)) = i + 1U;
|
||||
}
|
||||
try {
|
||||
chunks_.push_back(Chunk{data, 0, blocks_per_chunk_});
|
||||
} catch (...) {
|
||||
GetUpstreamResource()->Deallocate(data, data_size, alignment);
|
||||
throw;
|
||||
}
|
||||
last_alloc_chunk_ = &chunks_.back();
|
||||
last_dealloc_chunk_ = &chunks_.back();
|
||||
return allocate_block_from_chunk(last_alloc_chunk_);
|
||||
}
|
||||
|
||||
void Pool::Deallocate(void *p) {
|
||||
CHECK(last_dealloc_chunk_);
|
||||
CHECK(!chunks_.empty()) << "Expected a call to Deallocate after at least a "
|
||||
"single Allocate has been done.";
|
||||
auto is_in_chunk = [this, p](const Chunk &chunk) {
|
||||
auto ptr = reinterpret_cast<uintptr_t>(p);
|
||||
size_t data_size = blocks_per_chunk_ * block_size_;
|
||||
return reinterpret_cast<uintptr_t>(chunk.data) <= ptr &&
|
||||
ptr < reinterpret_cast<uintptr_t>(chunk.data + data_size);
|
||||
};
|
||||
auto deallocate_block_from_chunk = [this, p](Chunk *chunk) {
|
||||
// Link the block into the free-list
|
||||
auto *block = reinterpret_cast<unsigned char *>(p);
|
||||
*block = chunk->first_available_block_ix;
|
||||
chunk->first_available_block_ix = (block - chunk->data) / block_size_;
|
||||
};
|
||||
if (is_in_chunk(*last_dealloc_chunk_)) {
|
||||
deallocate_block_from_chunk(last_dealloc_chunk_);
|
||||
return;
|
||||
}
|
||||
// Find the chunk which served this allocation
|
||||
for (auto &chunk : chunks_) {
|
||||
if (is_in_chunk(chunk)) {
|
||||
// Update last_alloc_chunk_ as well because it now has a free block.
|
||||
// Additionally this corresponds with C++ pattern of allocations and
|
||||
// deallocations being done in reverse order.
|
||||
last_alloc_chunk_ = &chunk;
|
||||
last_dealloc_chunk_ = &chunk;
|
||||
deallocate_block_from_chunk(&chunk);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// TODO: We could release the Chunk to upstream memory
|
||||
}
|
||||
|
||||
void Pool::Release() {
|
||||
for (auto &chunk : chunks_) {
|
||||
size_t data_size = blocks_per_chunk_ * block_size_;
|
||||
size_t alignment = Ceil2(block_size_);
|
||||
GetUpstreamResource()->Deallocate(chunk.data, data_size, alignment);
|
||||
}
|
||||
chunks_.clear();
|
||||
}
|
||||
|
||||
} // namespace impl
|
||||
|
||||
PoolResource::PoolResource(size_t max_blocks_per_chunk, size_t max_block_size,
|
||||
MemoryResource *memory)
|
||||
: pools_(memory),
|
||||
unpooled_(memory),
|
||||
max_blocks_per_chunk_(
|
||||
std::min(max_blocks_per_chunk,
|
||||
static_cast<size_t>(impl::Pool::MaxBlocksInChunk()))),
|
||||
max_block_size_(max_block_size) {
|
||||
CHECK(max_blocks_per_chunk_ > 0U);
|
||||
CHECK(max_block_size_ > 0U);
|
||||
}
|
||||
|
||||
void *PoolResource::DoAllocate(size_t bytes, size_t alignment) {
|
||||
// Take the max of `bytes` and `alignment` so that we simplify handling
|
||||
// alignment requests.
|
||||
size_t block_size = std::max(bytes, alignment);
|
||||
// Check that we have received a regular allocation request with non-padded
|
||||
// structs/classes in play. These will always have
|
||||
// `sizeof(T) % alignof(T) == 0`. Special requests which don't have that
|
||||
// property can never be correctly handled with contiguous blocks. We would
|
||||
// have to write a general-purpose allocator which has to behave as complex
|
||||
// as malloc/free.
|
||||
if (block_size % alignment != 0)
|
||||
throw BadAlloc("Requested bytes must be a multiple of alignment");
|
||||
if (block_size > max_block_size_) {
|
||||
// Allocate a big block.
|
||||
BigBlock big_block{bytes, alignment,
|
||||
GetUpstreamResource()->Allocate(bytes, alignment)};
|
||||
// Insert the big block in the sorted position.
|
||||
auto it = std::lower_bound(
|
||||
unpooled_.begin(), unpooled_.end(), big_block,
|
||||
[](const auto &a, const auto &b) { return a.data < b.data; });
|
||||
try {
|
||||
unpooled_.insert(it, big_block);
|
||||
} catch (...) {
|
||||
GetUpstreamResource()->Deallocate(big_block.data, bytes, alignment);
|
||||
throw;
|
||||
}
|
||||
return big_block.data;
|
||||
}
|
||||
// Allocate a regular block, first check if last_alloc_pool_ is suitable.
|
||||
if (last_alloc_pool_ && last_alloc_pool_->GetBlockSize() == block_size) {
|
||||
return last_alloc_pool_->Allocate();
|
||||
}
|
||||
// Find the pool with greater or equal block_size.
|
||||
impl::Pool pool(block_size, max_blocks_per_chunk_, GetUpstreamResource());
|
||||
auto it = std::lower_bound(pools_.begin(), pools_.end(), pool,
|
||||
[](const auto &a, const auto &b) {
|
||||
return a.GetBlockSize() < b.GetBlockSize();
|
||||
});
|
||||
if (it != pools_.end() && it->GetBlockSize() == block_size) {
|
||||
last_alloc_pool_ = &*it;
|
||||
return it->Allocate();
|
||||
}
|
||||
// We don't have a pool for this block_size, so insert it in the sorted
|
||||
// position.
|
||||
it = pools_.emplace(it, std::move(pool));
|
||||
last_alloc_pool_ = &*it;
|
||||
last_dealloc_pool_ = &*it;
|
||||
return it->Allocate();
|
||||
}
|
||||
|
||||
void PoolResource::DoDeallocate(void *p, size_t bytes, size_t alignment) {
|
||||
size_t block_size = std::max(bytes, alignment);
|
||||
CHECK(block_size % alignment == 0)
|
||||
<< "PoolResource shouldn't serve allocation requests where bytes aren't "
|
||||
"a multiple of alignment";
|
||||
if (block_size > max_block_size_) {
|
||||
// Deallocate a big block.
|
||||
BigBlock big_block{bytes, alignment, p};
|
||||
auto it = std::lower_bound(
|
||||
unpooled_.begin(), unpooled_.end(), big_block,
|
||||
[](const auto &a, const auto &b) { return a.data < b.data; });
|
||||
CHECK(it != unpooled_.end());
|
||||
CHECK(it->data == p && it->bytes == bytes && it->alignment == alignment);
|
||||
unpooled_.erase(it);
|
||||
GetUpstreamResource()->Deallocate(p, bytes, alignment);
|
||||
return;
|
||||
}
|
||||
// Deallocate a regular block, first check if last_dealloc_pool_ is suitable.
|
||||
CHECK(last_dealloc_pool_);
|
||||
if (last_dealloc_pool_->GetBlockSize() == block_size)
|
||||
return last_dealloc_pool_->Deallocate(p);
|
||||
// Find the pool with equal block_size.
|
||||
impl::Pool pool(block_size, max_blocks_per_chunk_, GetUpstreamResource());
|
||||
auto it = std::lower_bound(pools_.begin(), pools_.end(), pool,
|
||||
[](const auto &a, const auto &b) {
|
||||
return a.GetBlockSize() < b.GetBlockSize();
|
||||
});
|
||||
CHECK(it != pools_.end());
|
||||
CHECK(it->GetBlockSize() == block_size);
|
||||
last_alloc_pool_ = &*it;
|
||||
last_dealloc_pool_ = &*it;
|
||||
return it->Deallocate(p);
|
||||
}
|
||||
|
||||
void PoolResource::Release() {
|
||||
for (auto &pool : pools_) pool.Release();
|
||||
pools_.clear();
|
||||
for (auto &big_block : unpooled_)
|
||||
GetUpstreamResource()->Deallocate(big_block.data, big_block.bytes,
|
||||
big_block.alignment);
|
||||
unpooled_.clear();
|
||||
}
|
||||
|
||||
// PoolResource END
|
||||
|
||||
} // namespace utils
|
||||
|
@ -6,6 +6,8 @@
|
||||
|
||||
#include <cstddef>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <new>
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
@ -15,8 +17,22 @@
|
||||
// version, i.e. gcc 9.x.
|
||||
#include <experimental/memory_resource>
|
||||
|
||||
#include "utils/math.hpp"
|
||||
#include "utils/spin_lock.hpp"
|
||||
|
||||
namespace utils {
|
||||
|
||||
/// std::bad_alloc has no constructor accepting a message, so we wrap our
|
||||
/// exceptions in this class.
|
||||
class BadAlloc final : public std::bad_alloc {
|
||||
std::string msg_;
|
||||
|
||||
public:
|
||||
explicit BadAlloc(const std::string &msg) : msg_(msg) {}
|
||||
|
||||
const char *what() const noexcept override { return msg_.c_str(); }
|
||||
};
|
||||
|
||||
/// Abstract class for writing custom memory management, i.e. allocators.
|
||||
class MemoryResource {
|
||||
public:
|
||||
@ -24,17 +40,17 @@ class MemoryResource {
|
||||
|
||||
/// Allocate storage with a size of at least `bytes` bytes.
|
||||
///
|
||||
/// The returned storage is aligned to to `alignment` clamped to
|
||||
/// `alignof(std::max_align_t)`. This means that it is valid to request larger
|
||||
/// alignment, but the storage will actually be aligned to
|
||||
/// `alignof(std::max_align_t)`.
|
||||
//
|
||||
/// Additionaly, `alignment` must be a power of 2, if it is not
|
||||
/// `std::bad_alloc` is thrown.
|
||||
/// `bytes` must be greater than 0, while `alignment` must be a power of 2, if
|
||||
/// they are not, `std::bad_alloc` is thrown.
|
||||
///
|
||||
/// Some concrete implementations may have stricter requirements on `bytes`
|
||||
/// and `alignment` values.
|
||||
///
|
||||
/// @throw std::bad_alloc if the requested storage and alignment combination
|
||||
/// cannot be obtained.
|
||||
void *Allocate(size_t bytes, size_t alignment = alignof(std::max_align_t)) {
|
||||
if (bytes == 0U || !IsPow2(alignment))
|
||||
throw BadAlloc("Invalid allocation request");
|
||||
return DoAllocate(bytes, alignment);
|
||||
}
|
||||
|
||||
@ -231,6 +247,15 @@ class StdMemoryResource final : public MemoryResource {
|
||||
|
||||
private:
|
||||
void *DoAllocate(size_t bytes, size_t alignment) override {
|
||||
// In the current implementation of libstdc++-8.3, standard memory_resource
|
||||
// implementations don't check alignment overflows. Below is the copied
|
||||
// implementation of _S_aligned_size, but we throw if it overflows.
|
||||
// Currently, this only concerns new_delete_resource as there are no other
|
||||
// memory_resource implementations available. This issue appears to persist
|
||||
// in newer implementations, additionally pool_resource does no alignment of
|
||||
// allocated pointers whatsoever.
|
||||
size_t aligned_size = ((bytes - 1) | (alignment - 1)) + 1;
|
||||
if (aligned_size < bytes) throw BadAlloc("Allocation alignment overflow");
|
||||
return memory_->allocate(bytes, alignment);
|
||||
}
|
||||
|
||||
@ -258,6 +283,9 @@ inline MemoryResource *NewDeleteResource() noexcept {
|
||||
///
|
||||
/// MonotonicBufferResource is not thread-safe!
|
||||
///
|
||||
/// MonotonicBufferResource cannot handle alignment requests greater than
|
||||
/// `alignof(std::max_align_t)`!
|
||||
///
|
||||
/// It's meant to be used for very fast allocations in situations where memory
|
||||
/// is used to build objects and release them all at once. The class is
|
||||
/// constructed with initial buffer size for storing allocated objects. When the
|
||||
@ -324,4 +352,171 @@ class MonotonicBufferResource final : public MemoryResource {
|
||||
}
|
||||
};
|
||||
|
||||
namespace impl {
|
||||
|
||||
/// Holds a number of Chunks each serving blocks of particular size. When a
|
||||
/// Chunk runs out of available blocks, a new Chunk is allocated. The naming is
|
||||
/// taken from `libstdc++` implementation, but the implementation details are
|
||||
/// more similar to `FixedAllocator` described in "Small Object Allocation" from
|
||||
/// "Modern C++ Design".
|
||||
class Pool final {
|
||||
/// Holds a pointer into a chunk of memory which consists of equal sized
|
||||
/// blocks. Each Chunk can handle `std::numeric_limits<unsigned char>::max()`
|
||||
/// number of blocks. Blocks form a "free-list", where each unused block has
|
||||
/// an embedded index to the next unused block.
|
||||
struct Chunk {
|
||||
unsigned char *data;
|
||||
unsigned char first_available_block_ix;
|
||||
unsigned char blocks_available;
|
||||
};
|
||||
|
||||
unsigned char blocks_per_chunk_;
|
||||
size_t block_size_;
|
||||
AVector<Chunk> chunks_;
|
||||
Chunk *last_alloc_chunk_{nullptr};
|
||||
Chunk *last_dealloc_chunk_{nullptr};
|
||||
|
||||
public:
|
||||
static constexpr auto MaxBlocksInChunk() {
|
||||
return std::numeric_limits<decltype(
|
||||
Chunk::first_available_block_ix)>::max();
|
||||
}
|
||||
|
||||
Pool(size_t block_size, unsigned char blocks_per_chunk,
|
||||
MemoryResource *memory);
|
||||
|
||||
Pool(const Pool &) = delete;
|
||||
Pool &operator=(const Pool &) = delete;
|
||||
Pool(Pool &&) noexcept = default;
|
||||
Pool &operator=(Pool &&) = default;
|
||||
|
||||
/// Destructor does not free blocks, you have to call `Release` before.
|
||||
~Pool();
|
||||
|
||||
MemoryResource *GetUpstreamResource() const {
|
||||
return chunks_.get_allocator().GetMemoryResource();
|
||||
}
|
||||
|
||||
auto GetBlockSize() const { return block_size_; }
|
||||
|
||||
/// Get a pointer to the next available block. Blocks are stored contiguously,
|
||||
/// so each one is aligned to block_size_ address starting from
|
||||
/// utils::Ceil2(block_size_) address.
|
||||
void *Allocate();
|
||||
|
||||
void Deallocate(void *p);
|
||||
|
||||
void Release();
|
||||
};
|
||||
|
||||
} // namespace impl
|
||||
|
||||
/// MemoryResource which serves allocation requests for different block sizes.
|
||||
///
|
||||
/// PoolResource is not thread-safe!
|
||||
///
|
||||
/// This class has the following properties with regards to memory management.
|
||||
///
|
||||
/// * All allocated memory will be freed upon destruction, even if Deallocate
|
||||
/// has not been called for some of the allocated blocks.
|
||||
/// * It consists of a collection of impl::Pool instances, each serving
|
||||
/// requests for different block sizes. Each impl::Pool manages a collection
|
||||
/// of impl::Pool::Chunk instances which are divided into blocks of uniform
|
||||
/// size.
|
||||
/// * Since this MemoryResource serves blocks of certain size, it cannot serve
|
||||
/// arbitrary alignment requests. Each requested block size must be a
|
||||
/// multiple of alignment or smaller than the alignment value.
|
||||
/// * An allocation request within the limits of the maximum block size will
|
||||
/// find a Pool serving the requested size. If there's no Pool serving such
|
||||
/// a request, a new one is instantiated.
|
||||
/// * When a Pool exhausts its Chunk, a new one is allocated with the size for
|
||||
/// the maximum number of blocks.
|
||||
/// * Allocation requests which exceed the maximum block size will be
|
||||
/// forwarded to upstream MemoryResource.
|
||||
/// * Maximum block size and maximum number of blocks per chunk can be tuned
|
||||
/// by passing the arguments to the constructor.
|
||||
class PoolResource final : public MemoryResource {
|
||||
public:
|
||||
/// Construct with given max_blocks_per_chunk, max_block_size and upstream
|
||||
/// memory.
|
||||
///
|
||||
/// The implementation will use std::min(max_blocks_per_chunk,
|
||||
/// impl::Pool::MaxBlocksInChunk()) as the real maximum number of blocks per
|
||||
/// chunk. Allocation requests exceeding max_block_size are simply forwarded
|
||||
/// to upstream memory.
|
||||
PoolResource(size_t max_blocks_per_chunk, size_t max_block_size,
|
||||
MemoryResource *memory = NewDeleteResource());
|
||||
|
||||
PoolResource(const PoolResource &) = delete;
|
||||
PoolResource &operator=(const PoolResource &) = delete;
|
||||
|
||||
PoolResource(PoolResource &&) = default;
|
||||
PoolResource &operator=(PoolResource &&) = default;
|
||||
|
||||
~PoolResource() override { Release(); }
|
||||
|
||||
MemoryResource *GetUpstreamResource() const {
|
||||
return pools_.get_allocator().GetMemoryResource();
|
||||
}
|
||||
|
||||
/// Release all allocated memory.
|
||||
void Release();
|
||||
|
||||
private:
|
||||
// Big block larger than max_block_size_, doesn't go into a pool.
|
||||
struct BigBlock {
|
||||
size_t bytes;
|
||||
size_t alignment;
|
||||
void *data;
|
||||
};
|
||||
|
||||
// TODO: Potential memory optimization is replacing `std::vector` with our
|
||||
// custom vector implementation which doesn't store a `MemoryResource *`.
|
||||
// Currently we have vectors for `pools_` and `unpooled_`, as well as each
|
||||
// `impl::Pool` stores a `chunks_` vector.
|
||||
|
||||
// Pools are sorted by bound_size_, ascending.
|
||||
AVector<impl::Pool> pools_;
|
||||
impl::Pool *last_alloc_pool_{nullptr};
|
||||
impl::Pool *last_dealloc_pool_{nullptr};
|
||||
// Unpooled BigBlocks are sorted by data pointer.
|
||||
AVector<BigBlock> unpooled_;
|
||||
size_t max_blocks_per_chunk_;
|
||||
size_t max_block_size_;
|
||||
|
||||
void *DoAllocate(size_t bytes, size_t alignment) override;
|
||||
|
||||
void DoDeallocate(void *p, size_t bytes, size_t alignment) override;
|
||||
|
||||
bool DoIsEqual(const MemoryResource &other) const noexcept override {
|
||||
return this == &other;
|
||||
}
|
||||
};
|
||||
|
||||
/// Like PoolResource but uses SpinLock for thread safe usage.
|
||||
class SynchronizedPoolResource final : public MemoryResource {
|
||||
public:
|
||||
SynchronizedPoolResource(size_t max_blocks_per_chunk, size_t max_block_size,
|
||||
MemoryResource *memory = NewDeleteResource())
|
||||
: pool_memory_(max_blocks_per_chunk, max_block_size, memory) {}
|
||||
|
||||
private:
|
||||
PoolResource pool_memory_;
|
||||
SpinLock lock_;
|
||||
|
||||
void *DoAllocate(size_t bytes, size_t alignment) override {
|
||||
std::lock_guard<SpinLock> guard(lock_);
|
||||
return pool_memory_.Allocate(bytes, alignment);
|
||||
}
|
||||
|
||||
void DoDeallocate(void *p, size_t bytes, size_t alignment) override {
|
||||
std::lock_guard<SpinLock> guard(lock_);
|
||||
pool_memory_.Deallocate(p, bytes, alignment);
|
||||
}
|
||||
|
||||
bool DoIsEqual(const MemoryResource &other) const noexcept override {
|
||||
return this == &other;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace utils
|
||||
|
@ -16,7 +16,6 @@ class TestMemory final : public utils::MemoryResource {
|
||||
new_count_++;
|
||||
EXPECT_TRUE(alignment != 0U && (alignment & (alignment - 1U)) == 0U)
|
||||
<< "Alignment must be power of 2";
|
||||
EXPECT_TRUE(alignment <= alignof(std::max_align_t));
|
||||
EXPECT_NE(bytes, 0);
|
||||
const size_t pad_size = 32;
|
||||
EXPECT_TRUE(bytes + pad_size > bytes) << "TestMemory size overflow";
|
||||
@ -42,7 +41,7 @@ class TestMemory final : public utils::MemoryResource {
|
||||
|
||||
void DoDeallocate(void *ptr, size_t bytes, size_t alignment) override {
|
||||
delete_count_++;
|
||||
// Dealloate the original ptr, before alignment adjustment.
|
||||
// Deallocate the original ptr, before alignment adjustment.
|
||||
return utils::NewDeleteResource()->Deallocate(
|
||||
static_cast<char *>(ptr) - alignment, bytes, alignment);
|
||||
}
|
||||
@ -98,8 +97,7 @@ TEST(MonotonicBufferResource, AllocationOverInitialSize) {
|
||||
EXPECT_EQ(test_mem.delete_count_, 1);
|
||||
{
|
||||
utils::MonotonicBufferResource mem(1024, &test_mem);
|
||||
// Test with large alignment
|
||||
CheckAllocation(&mem, 1024, 1024);
|
||||
CheckAllocation(&mem, 1025);
|
||||
EXPECT_EQ(test_mem.new_count_, 2);
|
||||
}
|
||||
EXPECT_EQ(test_mem.delete_count_, 2);
|
||||
@ -111,7 +109,7 @@ TEST(MonotonicBufferResource, AllocationOverCapacity) {
|
||||
utils::MonotonicBufferResource mem(1024, &test_mem);
|
||||
CheckAllocation(&mem, 24, 1);
|
||||
EXPECT_EQ(test_mem.new_count_, 1);
|
||||
CheckAllocation(&mem, 1000, 64);
|
||||
CheckAllocation(&mem, 1001);
|
||||
EXPECT_EQ(test_mem.new_count_, 2);
|
||||
EXPECT_EQ(test_mem.delete_count_, 0);
|
||||
mem.Release();
|
||||
@ -139,6 +137,13 @@ TEST(MonotonicBufferResource, AllocationWithSize0) {
|
||||
EXPECT_THROW(mem.Allocate(0), std::bad_alloc);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
TEST(MonotonicBufferResource, AllocationWithAlignmentGreaterThanMaxAlign) {
|
||||
utils::MonotonicBufferResource mem(1024);
|
||||
EXPECT_THROW(mem.Allocate(24, 2U * alignof(std::max_align_t)),
|
||||
std::bad_alloc);
|
||||
}
|
||||
|
||||
TEST(MonotonicBufferResource, AllocationWithSizeOverflow) {
|
||||
size_t max_size = std::numeric_limits<size_t>::max();
|
||||
utils::MonotonicBufferResource mem(1024);
|
||||
@ -186,6 +191,118 @@ TEST(MonotonicBufferResource, AllocationWithInitialBufferOnStack) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
TEST(PoolResource, SingleSmallBlockAllocations) {
|
||||
TestMemory test_mem;
|
||||
const size_t max_blocks_per_chunk = 3U;
|
||||
const size_t max_block_size = 64U;
|
||||
utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
|
||||
// Fill the first chunk.
|
||||
CheckAllocation(&mem, 64U, 1U);
|
||||
// May allocate more than once due to bookkeeping.
|
||||
EXPECT_GE(test_mem.new_count_, 1U);
|
||||
// Reset tracking and continue filling the first chunk.
|
||||
test_mem.new_count_ = 0U;
|
||||
CheckAllocation(&mem, 64U, 64U);
|
||||
CheckAllocation(&mem, 64U);
|
||||
EXPECT_EQ(test_mem.new_count_, 0U);
|
||||
// Reset tracking and fill the second chunk
|
||||
test_mem.new_count_ = 0U;
|
||||
CheckAllocation(&mem, 64U, 32U);
|
||||
auto *ptr1 = CheckAllocation(&mem, 32U, 64U); // this will become 64b block
|
||||
auto *ptr2 = CheckAllocation(&mem, 64U, 32U);
|
||||
// We expect one allocation for chunk and at most one for bookkeeping.
|
||||
EXPECT_TRUE(test_mem.new_count_ >= 1U && test_mem.new_count_ <= 2U);
|
||||
test_mem.delete_count_ = 0U;
|
||||
mem.Deallocate(ptr1, 32U, 64U);
|
||||
mem.Deallocate(ptr2, 64U, 32U);
|
||||
EXPECT_EQ(test_mem.delete_count_, 0U);
|
||||
mem.Release();
|
||||
EXPECT_GE(test_mem.delete_count_, 2U);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
TEST(PoolResource, MultipleSmallBlockAllocations) {
|
||||
TestMemory test_mem;
|
||||
const size_t max_blocks_per_chunk = 1U;
|
||||
const size_t max_block_size = 64U;
|
||||
utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
|
||||
CheckAllocation(&mem, 64U);
|
||||
CheckAllocation(&mem, 18U, 2U);
|
||||
CheckAllocation(&mem, 24U, 8U);
|
||||
// May allocate more than once per chunk due to bookkeeping.
|
||||
EXPECT_GE(test_mem.new_count_, 3U);
|
||||
// Reset tracking and fill the second chunk
|
||||
test_mem.new_count_ = 0U;
|
||||
CheckAllocation(&mem, 64U);
|
||||
CheckAllocation(&mem, 18U, 2U);
|
||||
CheckAllocation(&mem, 24U, 8U);
|
||||
// We expect one allocation for chunk and at most one for bookkeeping.
|
||||
EXPECT_TRUE(test_mem.new_count_ >= 3U && test_mem.new_count_ <= 6U);
|
||||
mem.Release();
|
||||
EXPECT_GE(test_mem.delete_count_, 6U);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
TEST(PoolResource, BigBlockAllocations) {
|
||||
TestMemory test_mem;
|
||||
const size_t max_blocks_per_chunk = 3U;
|
||||
const size_t max_block_size = 64U;
|
||||
utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
|
||||
CheckAllocation(&mem, max_block_size + 1, 1U);
|
||||
// May allocate more than once per block due to bookkeeping.
|
||||
EXPECT_GE(test_mem.new_count_, 1U);
|
||||
CheckAllocation(&mem, max_block_size + 1, 1U);
|
||||
EXPECT_GE(test_mem.new_count_, 2U);
|
||||
auto *ptr = CheckAllocation(&mem, max_block_size * 2, 1U);
|
||||
EXPECT_GE(test_mem.new_count_, 3U);
|
||||
mem.Deallocate(ptr, max_block_size * 2, 1U);
|
||||
EXPECT_GE(test_mem.delete_count_, 1U);
|
||||
mem.Release();
|
||||
EXPECT_GE(test_mem.delete_count_, 3U);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
TEST(PoolResource, BlockSizeIsNotMultipleOfAlignment) {
|
||||
const size_t max_blocks_per_chunk = 3U;
|
||||
const size_t max_block_size = 64U;
|
||||
utils::PoolResource mem(max_blocks_per_chunk, max_block_size);
|
||||
EXPECT_THROW(mem.Allocate(64U, 24U), std::bad_alloc);
|
||||
EXPECT_THROW(mem.Allocate(63U), std::bad_alloc);
|
||||
EXPECT_THROW(mem.Allocate(max_block_size + 1, max_block_size),
|
||||
std::bad_alloc);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
TEST(PoolResource, AllocationWithOverflow) {
|
||||
{
|
||||
const size_t max_blocks_per_chunk = 2U;
|
||||
utils::PoolResource mem(max_blocks_per_chunk,
|
||||
std::numeric_limits<size_t>::max());
|
||||
EXPECT_THROW(mem.Allocate(std::numeric_limits<size_t>::max(), 1U),
|
||||
std::bad_alloc);
|
||||
// Throws because initial chunk block is aligned to
|
||||
// utils::Ceil2(block_size), which wraps in this case.
|
||||
EXPECT_THROW(mem.Allocate((std::numeric_limits<size_t>::max() - 1U) /
|
||||
max_blocks_per_chunk,
|
||||
1U),
|
||||
std::bad_alloc);
|
||||
}
|
||||
{
|
||||
const size_t max_blocks_per_chunk = utils::impl::Pool::MaxBlocksInChunk();
|
||||
utils::PoolResource mem(max_blocks_per_chunk,
|
||||
std::numeric_limits<size_t>::max());
|
||||
EXPECT_THROW(mem.Allocate(std::numeric_limits<size_t>::max(), 1U),
|
||||
std::bad_alloc);
|
||||
// Throws because initial chunk block is aligned to
|
||||
// utils::Ceil2(block_size), which wraps in this case.
|
||||
EXPECT_THROW(mem.Allocate((std::numeric_limits<size_t>::max() - 1U) /
|
||||
max_blocks_per_chunk,
|
||||
1U),
|
||||
std::bad_alloc);
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||
class ContainerWithAllocatorLast final {
|
||||
public:
|
||||
|
Loading…
Reference in New Issue
Block a user