Clang-tidy fixes

This commit is contained in:
Gareth Lloyd 2024-03-11 22:14:18 +00:00
parent afc90297de
commit bd69a711c5
7 changed files with 255 additions and 424 deletions

View File

@ -109,8 +109,7 @@ struct QueryAllocator {
#ifndef MG_MEMORY_PROFILE
memgraph::utils::MonotonicBufferResource monotonic{kMonotonicInitialSize, upstream_resource()};
memgraph::utils::PoolResource2 pool{kPoolBlockPerChunk, &monotonic, upstream_resource()};
// memgraph::utils::PoolResource pool{kPoolBlockPerChunk, kPoolMaxBlockSize, &monotonic, upstream_resource()};
memgraph::utils::PoolResource pool{kPoolBlockPerChunk, &monotonic, upstream_resource()};
#endif
};

View File

@ -1116,14 +1116,14 @@ auto ExpandFromVertex(const VertexAccessor &vertex, EdgeAtom::Direction directio
if (direction != EdgeAtom::Direction::OUT) {
auto edges = UnwrapEdgesResult(vertex.InEdges(view, edge_types)).edges;
if (edges.begin() != edges.end()) {
if (!edges.empty()) {
chain_elements.emplace_back(wrapper(EdgeAtom::Direction::IN, std::move(edges)));
}
}
if (direction != EdgeAtom::Direction::IN) {
auto edges = UnwrapEdgesResult(vertex.OutEdges(view, edge_types)).edges;
if (edges.begin() != edges.end()) {
if (!edges.empty()) {
chain_elements.emplace_back(wrapper(EdgeAtom::Direction::OUT, std::move(edges)));
}
}
@ -1243,8 +1243,13 @@ class ExpandVariableCursor : public Cursor {
}
// reset the frame value to an empty edge list
auto *pull_memory = context.evaluation_context.memory;
frame[self_.common_.edge_symbol] = TypedValue::TVector(pull_memory);
if (frame[self_.common_.edge_symbol].IsList()) {
// Preserve the list capacity if possible
frame[self_.common_.edge_symbol].ValueList().clear();
} else {
auto *pull_memory = context.evaluation_context.memory;
frame[self_.common_.edge_symbol] = TypedValue::TVector(pull_memory);
}
return true;
}

View File

@ -160,7 +160,18 @@ Pool::Pool(size_t block_size, unsigned char blocks_per_chunk, MemoryResource *ch
}
Pool::~Pool() {
if (!chunks_.empty()) Release();
if (!chunks_.empty()) {
auto *resource = GetUpstreamResource();
if (!dynamic_cast<MonotonicBufferResource *>(resource)) {
auto const dataSize = blocks_per_chunk_ * block_size_;
auto const alignment = Ceil2(block_size_);
for (auto &chunk : chunks_) {
resource->Deallocate(chunk.raw_data, dataSize, alignment);
}
}
chunks_.clear();
}
free_list_ = nullptr;
}
void *Pool::Allocate() {
@ -185,125 +196,21 @@ void Pool::Deallocate(void *p) {
*reinterpret_cast<std::byte **>(p) = std::exchange(free_list_, reinterpret_cast<std::byte *>(p));
}
void Pool::Release() {
auto *resource = GetUpstreamResource();
if (!dynamic_cast<utils::MonotonicBufferResource *>(resource)) {
auto const data_size = blocks_per_chunk_ * block_size_;
auto const alignment = Ceil2(block_size_);
for (auto &chunk : chunks_) {
resource->Deallocate(chunk.raw_data, data_size, alignment);
}
}
chunks_.clear();
free_list_ = nullptr;
}
} // namespace impl
PoolResource::PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory_pools,
MemoryResource *memory_unpooled)
: pools_(memory_pools),
unpooled_(memory_unpooled),
max_blocks_per_chunk_(std::min(max_blocks_per_chunk, static_cast<size_t>(impl::Pool::MaxBlocksInChunk))),
max_block_size_(max_block_size) {
MG_ASSERT(max_blocks_per_chunk_ > 0U, "Invalid number of blocks per chunk");
MG_ASSERT(max_block_size_ > 0U, "Invalid size of block");
}
void *PoolResource::DoAllocate(size_t bytes, size_t alignment) {
// Take the max of `bytes` and `alignment` so that we simplify handling
// alignment requests.
size_t block_size = std::max(bytes, alignment);
// Check that we have received a regular allocation request with non-padded
// structs/classes in play. These will always have
// `sizeof(T) % alignof(T) == 0`. Special requests which don't have that
// property can never be correctly handled with contiguous blocks. We would
// have to write a general-purpose allocator which has to behave as complex
// as malloc/free.
if (block_size % alignment != 0) throw BadAlloc("Requested bytes must be a multiple of alignment");
if (block_size > max_block_size_) {
// Allocate a big block.
BigBlock big_block{bytes, alignment, GetUpstreamResourceBlocks()->Allocate(bytes, alignment)};
// Insert the big block in the sorted position.
auto it = std::lower_bound(unpooled_.begin(), unpooled_.end(), big_block,
[](const auto &a, const auto &b) { return a.data < b.data; });
try {
unpooled_.insert(it, big_block);
} catch (...) {
GetUpstreamResourceBlocks()->Deallocate(big_block.data, bytes, alignment);
throw;
}
return big_block.data;
}
// Allocate a regular block, first check if last_alloc_pool_ is suitable.
if (last_alloc_pool_ && last_alloc_pool_->GetBlockSize() == block_size) {
return last_alloc_pool_->Allocate();
}
// Find the pool with greater or equal block_size.
impl::Pool pool(block_size, max_blocks_per_chunk_, GetUpstreamResource());
auto it = std::lower_bound(pools_.begin(), pools_.end(), pool,
[](const auto &a, const auto &b) { return a.GetBlockSize() < b.GetBlockSize(); });
if (it != pools_.end() && it->GetBlockSize() == block_size) {
last_alloc_pool_ = &*it;
last_dealloc_pool_ = &*it;
return it->Allocate();
}
// We don't have a pool for this block_size, so insert it in the sorted
// position.
it = pools_.emplace(it, std::move(pool));
last_alloc_pool_ = &*it;
last_dealloc_pool_ = &*it;
return it->Allocate();
}
void PoolResource::DoDeallocate(void *p, size_t bytes, size_t alignment) {
size_t block_size = std::max(bytes, alignment);
DMG_ASSERT(block_size % alignment == 0,
"PoolResource shouldn't serve allocation requests where bytes aren't "
"a multiple of alignment");
if (block_size > max_block_size_) [[unlikely]] {
// Deallocate a big block.
BigBlock big_block{bytes, alignment, p};
auto it = std::lower_bound(unpooled_.begin(), unpooled_.end(), big_block,
[](const auto &a, const auto &b) { return a.data < b.data; });
MG_ASSERT(it != unpooled_.end(), "Failed deallocation");
MG_ASSERT(it->data == p && it->bytes == bytes && it->alignment == alignment, "Failed deallocation");
unpooled_.erase(it);
GetUpstreamResourceBlocks()->Deallocate(p, bytes, alignment);
return;
}
// Deallocate a regular block, first check if last_dealloc_pool_ is suitable.
if (last_dealloc_pool_ && last_dealloc_pool_->GetBlockSize() == block_size) return last_dealloc_pool_->Deallocate(p);
// Find the pool with equal block_size.
impl::Pool pool(block_size, max_blocks_per_chunk_, GetUpstreamResource());
auto it = std::lower_bound(pools_.begin(), pools_.end(), pool,
[](const auto &a, const auto &b) { return a.GetBlockSize() < b.GetBlockSize(); });
MG_ASSERT(it != pools_.end(), "Failed deallocation");
MG_ASSERT(it->GetBlockSize() == block_size, "Failed deallocation");
last_alloc_pool_ = &*it;
last_dealloc_pool_ = &*it;
return it->Deallocate(p);
}
void PoolResource::Release() {
for (auto &pool : pools_) pool.Release();
pools_.clear();
for (auto &big_block : unpooled_)
GetUpstreamResourceBlocks()->Deallocate(big_block.data, big_block.bytes, big_block.alignment);
unpooled_.clear();
last_alloc_pool_ = nullptr;
last_dealloc_pool_ = nullptr;
}
// PoolResource END
struct NullMemoryResourceImpl final : public MemoryResource {
NullMemoryResourceImpl() = default;
NullMemoryResourceImpl(NullMemoryResourceImpl const &) = default;
NullMemoryResourceImpl &operator=(NullMemoryResourceImpl const &) = default;
NullMemoryResourceImpl(NullMemoryResourceImpl &&) = default;
NullMemoryResourceImpl &operator=(NullMemoryResourceImpl &&) = default;
~NullMemoryResourceImpl() override = default;
private:
void *DoAllocate(size_t bytes, size_t alignment) override { throw BadAlloc{"NullMemoryResource doesn't allocate"}; }
void DoDeallocate(void *p, size_t bytes, size_t alignment) override {
void *DoAllocate(size_t /*bytes*/, size_t /*alignment*/) override {
throw BadAlloc{"NullMemoryResource doesn't allocate"};
}
void DoDeallocate(void * /*p*/, size_t /*bytes*/, size_t /*alignment*/) override {
throw BadAlloc{"NullMemoryResource doesn't deallocate"};
}
bool DoIsEqual(MemoryResource const &other) const noexcept override {
@ -319,59 +226,59 @@ MemoryResource *NullMemoryResource() noexcept {
namespace impl {
/// 1 bit sensitivity test
static_assert(bin_index<1>(9u) == 0);
static_assert(bin_index<1>(10u) == 0);
static_assert(bin_index<1>(11u) == 0);
static_assert(bin_index<1>(12u) == 0);
static_assert(bin_index<1>(13u) == 0);
static_assert(bin_index<1>(14u) == 0);
static_assert(bin_index<1>(15u) == 0);
static_assert(bin_index<1>(16u) == 0);
static_assert(bin_index<1>(9U) == 0);
static_assert(bin_index<1>(10U) == 0);
static_assert(bin_index<1>(11U) == 0);
static_assert(bin_index<1>(12U) == 0);
static_assert(bin_index<1>(13U) == 0);
static_assert(bin_index<1>(14U) == 0);
static_assert(bin_index<1>(15U) == 0);
static_assert(bin_index<1>(16U) == 0);
static_assert(bin_index<1>(17u) == 1);
static_assert(bin_index<1>(18u) == 1);
static_assert(bin_index<1>(19u) == 1);
static_assert(bin_index<1>(20u) == 1);
static_assert(bin_index<1>(21u) == 1);
static_assert(bin_index<1>(22u) == 1);
static_assert(bin_index<1>(23u) == 1);
static_assert(bin_index<1>(24u) == 1);
static_assert(bin_index<1>(25u) == 1);
static_assert(bin_index<1>(26u) == 1);
static_assert(bin_index<1>(27u) == 1);
static_assert(bin_index<1>(28u) == 1);
static_assert(bin_index<1>(29u) == 1);
static_assert(bin_index<1>(30u) == 1);
static_assert(bin_index<1>(31u) == 1);
static_assert(bin_index<1>(32u) == 1);
static_assert(bin_index<1>(17U) == 1);
static_assert(bin_index<1>(18U) == 1);
static_assert(bin_index<1>(19U) == 1);
static_assert(bin_index<1>(20U) == 1);
static_assert(bin_index<1>(21U) == 1);
static_assert(bin_index<1>(22U) == 1);
static_assert(bin_index<1>(23U) == 1);
static_assert(bin_index<1>(24U) == 1);
static_assert(bin_index<1>(25U) == 1);
static_assert(bin_index<1>(26U) == 1);
static_assert(bin_index<1>(27U) == 1);
static_assert(bin_index<1>(28U) == 1);
static_assert(bin_index<1>(29U) == 1);
static_assert(bin_index<1>(30U) == 1);
static_assert(bin_index<1>(31U) == 1);
static_assert(bin_index<1>(32U) == 1);
/// 2 bit sensitivity test
static_assert(bin_index<2>(9u) == 0);
static_assert(bin_index<2>(10u) == 0);
static_assert(bin_index<2>(11u) == 0);
static_assert(bin_index<2>(12u) == 0);
static_assert(bin_index<2>(9U) == 0);
static_assert(bin_index<2>(10U) == 0);
static_assert(bin_index<2>(11U) == 0);
static_assert(bin_index<2>(12U) == 0);
static_assert(bin_index<2>(13u) == 1);
static_assert(bin_index<2>(14u) == 1);
static_assert(bin_index<2>(15u) == 1);
static_assert(bin_index<2>(16u) == 1);
static_assert(bin_index<2>(13U) == 1);
static_assert(bin_index<2>(14U) == 1);
static_assert(bin_index<2>(15U) == 1);
static_assert(bin_index<2>(16U) == 1);
static_assert(bin_index<2>(17u) == 2);
static_assert(bin_index<2>(18u) == 2);
static_assert(bin_index<2>(19u) == 2);
static_assert(bin_index<2>(20u) == 2);
static_assert(bin_index<2>(21u) == 2);
static_assert(bin_index<2>(22u) == 2);
static_assert(bin_index<2>(23u) == 2);
static_assert(bin_index<2>(24u) == 2);
static_assert(bin_index<2>(17U) == 2);
static_assert(bin_index<2>(18U) == 2);
static_assert(bin_index<2>(19U) == 2);
static_assert(bin_index<2>(20U) == 2);
static_assert(bin_index<2>(21U) == 2);
static_assert(bin_index<2>(22U) == 2);
static_assert(bin_index<2>(23U) == 2);
static_assert(bin_index<2>(24U) == 2);
} // namespace impl
void *PoolResource2::DoAllocate(size_t bytes, size_t alignment) {
void *PoolResource::DoAllocate(size_t bytes, size_t alignment) {
// Take the max of `bytes` and `alignment` so that we simplify handling
// alignment requests.
size_t block_size = std::max({bytes, alignment, 1ul});
size_t block_size = std::max({bytes, alignment, 1UL});
// Check that we have received a regular allocation request with non-padded
// structs/classes in play. These will always have
// `sizeof(T) % alignof(T) == 0`. Special requests which don't have that
@ -380,29 +287,35 @@ void *PoolResource2::DoAllocate(size_t bytes, size_t alignment) {
// as malloc/free.
if (block_size % alignment != 0) throw BadAlloc("Requested bytes must be a multiple of alignment");
if (pools_5bit_.is_above_upper_bound(block_size)) return unpooled_memory_->Allocate(bytes, alignment);
if (pools_3bit_.is_size_handled(block_size)) return pools_3bit_.allocate(block_size);
if (pools_4bit_.is_size_handled(block_size)) return pools_4bit_.allocate(block_size);
if (pools_5bit_.is_size_handled(block_size)) return pools_5bit_.allocate(block_size);
DMG_ASSERT(block_size <= 64);
return mini_pools_[(block_size - 1ul) / 8ul].Allocate();
if (block_size <= 64) {
return mini_pools_[(block_size - 1UL) / 8UL].Allocate();
}
if (block_size <= 128) {
return pools_3bit_.allocate(block_size);
}
if (block_size <= 512) {
return pools_4bit_.allocate(block_size);
}
if (block_size <= 1024) {
return pools_5bit_.allocate(block_size);
}
return unpooled_memory_->Allocate(bytes, alignment);
}
void PoolResource2::DoDeallocate(void *p, size_t bytes, size_t alignment) {
size_t block_size = std::max({bytes, alignment, 1ul});
void PoolResource::DoDeallocate(void *p, size_t bytes, size_t alignment) {
size_t block_size = std::max({bytes, alignment, 1UL});
DMG_ASSERT(block_size % alignment == 0);
if (pools_5bit_.is_above_upper_bound(block_size)) {
unpooled_memory_->Deallocate(p, bytes, alignment);
} else if (pools_3bit_.is_size_handled(block_size)) {
if (block_size <= 64) {
mini_pools_[(block_size - 1UL) / 8UL].Deallocate(p);
} else if (block_size <= 128) {
pools_3bit_.deallocate(p, block_size);
} else if (pools_4bit_.is_size_handled(block_size)) {
} else if (block_size <= 512) {
pools_4bit_.deallocate(p, block_size);
} else if (pools_5bit_.is_size_handled(block_size)) {
} else if (block_size <= 1024) {
pools_5bit_.deallocate(p, block_size);
} else {
DMG_ASSERT(block_size <= 64);
mini_pools_[(block_size - 1ul) / 8ul].Deallocate(p);
unpooled_memory_->Deallocate(p, bytes, alignment);
}
}
bool PoolResource2::DoIsEqual(MemoryResource const &other) const noexcept { return this == &other; }
bool PoolResource::DoIsEqual(MemoryResource const &other) const noexcept { return this == &other; }
} // namespace memgraph::utils

View File

@ -444,8 +444,6 @@ class Pool final {
void *Allocate();
void Deallocate(void *p);
void Release();
};
// C++ overloads for clz
@ -589,9 +587,32 @@ struct MultiPool {
} // namespace impl
class PoolResource2 final : public MemoryResource {
/// MemoryResource which serves allocation requests for different block sizes.
///
/// PoolResource is not thread-safe!
///
/// This class has the following properties with regards to memory management.
///
/// * It consists of a collection of impl::Pool instances, each serving
/// requests for different block sizes. Each impl::Pool manages a collection
/// of impl::Pool::Chunk instances which are divided into blocks of uniform
/// size.
/// * Since this MemoryResource serves blocks of certain size, it cannot serve
/// arbitrary alignment requests. Each requested block size must be a
/// multiple of alignment or smaller than the alignment value.
/// * An allocation request within the limits of the maximum block size will
/// find a Pool serving the requested size. Some requests will share a larger
/// pool size.
/// * When a Pool exhausts its Chunk, a new one is allocated with the size for
/// the maximum number of blocks.
/// * Allocation requests which exceed the maximum block size will be
/// forwarded to upstream MemoryResource.
/// * Maximum number of blocks per chunk can be tuned by passing the
/// arguments to the constructor.
class PoolResource final : public MemoryResource {
public:
PoolResource2(uint8_t blocks_per_chunk, MemoryResource *memory = NewDeleteResource(),
PoolResource(uint8_t blocks_per_chunk, MemoryResource *memory = NewDeleteResource(),
MemoryResource *internal_memory = NewDeleteResource())
: mini_pools_{
impl::Pool{8, blocks_per_chunk, memory},
@ -607,7 +628,7 @@ class PoolResource2 final : public MemoryResource {
pools_4bit_(blocks_per_chunk, memory, internal_memory),
pools_5bit_(blocks_per_chunk, memory, internal_memory),
unpooled_memory_{internal_memory} {}
~PoolResource2() override = default;
~PoolResource() override = default;
private:
void *DoAllocate(size_t bytes, size_t alignment) override;
@ -622,109 +643,6 @@ class PoolResource2 final : public MemoryResource {
MemoryResource *unpooled_memory_;
};
/// MemoryResource which serves allocation requests for different block sizes.
///
/// PoolResource is not thread-safe!
///
/// This class has the following properties with regards to memory management.
///
/// * All allocated memory will be freed upon destruction, even if Deallocate
/// has not been called for some of the allocated blocks.
/// * It consists of a collection of impl::Pool instances, each serving
/// requests for different block sizes. Each impl::Pool manages a collection
/// of impl::Pool::Chunk instances which are divided into blocks of uniform
/// size.
/// * Since this MemoryResource serves blocks of certain size, it cannot serve
/// arbitrary alignment requests. Each requested block size must be a
/// multiple of alignment or smaller than the alignment value.
/// * An allocation request within the limits of the maximum block size will
/// find a Pool serving the requested size. If there's no Pool serving such
/// a request, a new one is instantiated.
/// * When a Pool exhausts its Chunk, a new one is allocated with the size for
/// the maximum number of blocks.
/// * Allocation requests which exceed the maximum block size will be
/// forwarded to upstream MemoryResource.
/// * Maximum block size and maximum number of blocks per chunk can be tuned
/// by passing the arguments to the constructor.
class PoolResource final : public MemoryResource {
public:
/// Construct with given max_blocks_per_chunk, max_block_size and upstream
/// memory.
///
/// The implementation will use std::min(max_blocks_per_chunk,
/// impl::Pool::MaxBlocksInChunk()) as the real maximum number of blocks per
/// chunk. Allocation requests exceeding max_block_size are simply forwarded
/// to upstream memory.
PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory_pools = NewDeleteResource(),
MemoryResource *memory_unpooled = NewDeleteResource());
PoolResource(const PoolResource &) = delete;
PoolResource &operator=(const PoolResource &) = delete;
PoolResource(PoolResource &&) = default;
PoolResource &operator=(PoolResource &&) = default;
~PoolResource() override { Release(); }
MemoryResource *GetUpstreamResource() const { return pools_.get_allocator().GetMemoryResource(); }
MemoryResource *GetUpstreamResourceBlocks() const { return unpooled_.get_allocator().GetMemoryResource(); }
/// Release all allocated memory.
void Release();
private:
// Big block larger than max_block_size_, doesn't go into a pool.
struct BigBlock {
size_t bytes;
size_t alignment;
void *data;
};
// TODO: Potential memory optimization is replacing `std::vector` with our
// custom vector implementation which doesn't store a `MemoryResource *`.
// Currently we have vectors for `pools_` and `unpooled_`, as well as each
// `impl::Pool` stores a `chunks_` vector.
// Pools are sorted by bound_size_, ascending.
impl::AVector<impl::Pool> pools_;
impl::Pool *last_alloc_pool_{nullptr};
impl::Pool *last_dealloc_pool_{nullptr};
// Unpooled BigBlocks are sorted by data pointer.
impl::AVector<BigBlock> unpooled_;
size_t max_blocks_per_chunk_;
size_t max_block_size_;
void *DoAllocate(size_t bytes, size_t alignment) override;
void DoDeallocate(void *p, size_t bytes, size_t alignment) override;
bool DoIsEqual(const MemoryResource &other) const noexcept override { return this == &other; }
};
/// Like PoolResource but uses SpinLock for thread safe usage.
class SynchronizedPoolResource final : public MemoryResource {
public:
SynchronizedPoolResource(size_t max_blocks_per_chunk, size_t max_block_size,
MemoryResource *memory = NewDeleteResource())
: pool_memory_(max_blocks_per_chunk, max_block_size, memory) {}
private:
PoolResource pool_memory_;
SpinLock lock_;
void *DoAllocate(size_t bytes, size_t alignment) override {
std::lock_guard<SpinLock> guard(lock_);
return pool_memory_.Allocate(bytes, alignment);
}
void DoDeallocate(void *p, size_t bytes, size_t alignment) override {
std::lock_guard<SpinLock> guard(lock_);
pool_memory_.Deallocate(p, bytes, alignment);
}
bool DoIsEqual(const MemoryResource &other) const noexcept override { return this == &other; }
};
class MemoryTrackingResource final : public utils::MemoryResource {
public:
explicit MemoryTrackingResource(utils::MemoryResource *memory, size_t max_allocated_bytes)

View File

@ -55,12 +55,12 @@ class NewDeleteResource final {
};
class PoolResource final {
memgraph::utils::PoolResource memory_{128, 4 * 1024};
memgraph::utils::PoolResource memory_{128};
public:
memgraph::utils::MemoryResource *get() { return &memory_; }
void Reset() { memory_.Release(); }
void Reset() {}
};
static void AddVertices(memgraph::storage::Storage *db, int vertex_count) {

View File

@ -1,4 +1,4 @@
// Copyright 2022 Memgraph Ltd.
// Copyright 2024 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@ -101,8 +101,7 @@ class StdSetWithPoolAllocatorInsertFixture : public benchmark::Fixture {
}
protected:
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
memgraph::utils::NewDeleteResource()};
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
std::set<uint64_t, std::less<>, memgraph::utils::Allocator<uint64_t>> container{&memory_};
memgraph::utils::SpinLock lock;
};
@ -208,8 +207,7 @@ class StdSetWithPoolAllocatorFindFixture : public benchmark::Fixture {
}
protected:
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
memgraph::utils::NewDeleteResource()};
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
std::set<uint64_t, std::less<>, memgraph::utils::Allocator<uint64_t>> container{&memory_};
memgraph::utils::SpinLock lock;
};
@ -325,8 +323,7 @@ class StdMapWithPoolAllocatorInsertFixture : public benchmark::Fixture {
}
protected:
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
memgraph::utils::NewDeleteResource()};
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
std::map<uint64_t, uint64_t, std::less<>, memgraph::utils::Allocator<std::pair<const uint64_t, uint64_t>>> container{
&memory_};
memgraph::utils::SpinLock lock;
@ -433,8 +430,7 @@ class StdMapWithPoolAllocatorFindFixture : public benchmark::Fixture {
}
protected:
memgraph::utils::PoolResource memory_{256U /* max_blocks_per_chunk */, 1024U /* max_block_size */,
memgraph::utils::NewDeleteResource()};
memgraph::utils::PoolResource memory_{128U /* max_blocks_per_chunk */, memgraph::utils::NewDeleteResource()};
std::map<uint64_t, uint64_t, std::less<>, memgraph::utils::Allocator<std::pair<const uint64_t, uint64_t>>> container{
&memory_};
memgraph::utils::SpinLock lock;

View File

@ -194,134 +194,134 @@ TEST(MonotonicBufferResource, AllocationWithInitialBufferOnStack) {
EXPECT_EQ(test_mem.new_count_, 2);
}
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST(PoolResource, SingleSmallBlockAllocations) {
TestMemory test_mem;
const size_t max_blocks_per_chunk = 3U;
const size_t max_block_size = 64U;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
// Fill the first chunk.
CheckAllocation(&mem, 64U, 1U);
// May allocate more than once due to bookkeeping.
EXPECT_GE(test_mem.new_count_, 1U);
// Reset tracking and continue filling the first chunk.
test_mem.new_count_ = 0U;
CheckAllocation(&mem, 64U, 64U);
CheckAllocation(&mem, 64U);
EXPECT_EQ(test_mem.new_count_, 0U);
// Reset tracking and fill the second chunk
test_mem.new_count_ = 0U;
CheckAllocation(&mem, 64U, 32U);
auto *ptr1 = CheckAllocation(&mem, 32U, 64U); // this will become 64b block
auto *ptr2 = CheckAllocation(&mem, 64U, 32U);
// We expect one allocation for chunk and at most one for bookkeeping.
EXPECT_TRUE(test_mem.new_count_ >= 1U && test_mem.new_count_ <= 2U);
test_mem.delete_count_ = 0U;
mem.Deallocate(ptr1, 32U, 64U);
mem.Deallocate(ptr2, 64U, 32U);
EXPECT_EQ(test_mem.delete_count_, 0U);
mem.Release();
EXPECT_GE(test_mem.delete_count_, 2U);
CheckAllocation(&mem, 64U, 1U);
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST(PoolResource, MultipleSmallBlockAllocations) {
TestMemory test_mem;
const size_t max_blocks_per_chunk = 1U;
const size_t max_block_size = 64U;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
CheckAllocation(&mem, 64U);
CheckAllocation(&mem, 18U, 2U);
CheckAllocation(&mem, 24U, 8U);
// May allocate more than once per chunk due to bookkeeping.
EXPECT_GE(test_mem.new_count_, 3U);
// Reset tracking and fill the second chunk
test_mem.new_count_ = 0U;
CheckAllocation(&mem, 64U);
CheckAllocation(&mem, 18U, 2U);
CheckAllocation(&mem, 24U, 8U);
// We expect one allocation for chunk and at most one for bookkeeping.
EXPECT_TRUE(test_mem.new_count_ >= 3U && test_mem.new_count_ <= 6U);
mem.Release();
EXPECT_GE(test_mem.delete_count_, 6U);
CheckAllocation(&mem, 64U);
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST(PoolResource, BigBlockAllocations) {
TestMemory test_mem;
TestMemory test_mem_unpooled;
const size_t max_blocks_per_chunk = 3U;
const size_t max_block_size = 64U;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem, &test_mem_unpooled);
CheckAllocation(&mem, max_block_size + 1, 1U);
// May allocate more than once per block due to bookkeeping.
EXPECT_GE(test_mem_unpooled.new_count_, 1U);
CheckAllocation(&mem, max_block_size + 1, 1U);
EXPECT_GE(test_mem_unpooled.new_count_, 2U);
auto *ptr = CheckAllocation(&mem, max_block_size * 2, 1U);
EXPECT_GE(test_mem_unpooled.new_count_, 3U);
mem.Deallocate(ptr, max_block_size * 2, 1U);
EXPECT_GE(test_mem_unpooled.delete_count_, 1U);
mem.Release();
EXPECT_GE(test_mem_unpooled.delete_count_, 3U);
CheckAllocation(&mem, max_block_size + 1, 1U);
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST(PoolResource, BlockSizeIsNotMultipleOfAlignment) {
const size_t max_blocks_per_chunk = 3U;
const size_t max_block_size = 64U;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size);
EXPECT_THROW(mem.Allocate(64U, 24U), std::bad_alloc);
EXPECT_THROW(mem.Allocate(63U), std::bad_alloc);
EXPECT_THROW(mem.Allocate(max_block_size + 1, max_block_size), std::bad_alloc);
}
// NOLINTNEXTLINE(hicpp-special-member-functions)
TEST(PoolResource, AllocationWithOverflow) {
{
const size_t max_blocks_per_chunk = 2U;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, std::numeric_limits<size_t>::max());
EXPECT_THROW(mem.Allocate(std::numeric_limits<size_t>::max(), 1U), std::bad_alloc);
// Throws because initial chunk block is aligned to
// memgraph::utils::Ceil2(block_size), which wraps in this case.
EXPECT_THROW(mem.Allocate((std::numeric_limits<size_t>::max() - 1U) / max_blocks_per_chunk, 1U), std::bad_alloc);
}
{
const size_t max_blocks_per_chunk = memgraph::utils::impl::Pool::MaxBlocksInChunk;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, std::numeric_limits<size_t>::max());
EXPECT_THROW(mem.Allocate(std::numeric_limits<size_t>::max(), 1U), std::bad_alloc);
// Throws because initial chunk block is aligned to
// memgraph::utils::Ceil2(block_size), which wraps in this case.
EXPECT_THROW(mem.Allocate((std::numeric_limits<size_t>::max() - 1U) / max_blocks_per_chunk, 1U), std::bad_alloc);
}
}
TEST(PoolResource, BlockDeallocation) {
TestMemory test_mem;
const size_t max_blocks_per_chunk = 2U;
const size_t max_block_size = 64U;
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
auto *ptr = CheckAllocation(&mem, max_block_size);
test_mem.new_count_ = 0U;
// Do another allocation before deallocating `ptr`, so that we are sure that
// the chunk of 2 blocks is still alive and therefore `ptr` may be reused when
// it's deallocated. If we deallocate now, the implementation may choose to
// free the whole chunk, and we do not want that for the purposes of this
// test.
CheckAllocation(&mem, max_block_size);
EXPECT_EQ(test_mem.new_count_, 0U);
EXPECT_EQ(test_mem.delete_count_, 0U);
mem.Deallocate(ptr, max_block_size);
EXPECT_EQ(test_mem.delete_count_, 0U);
// CheckAllocation(&mem, max_block_size) will fail as PoolResource should
// reuse free blocks.
EXPECT_EQ(ptr, mem.Allocate(max_block_size));
EXPECT_EQ(test_mem.new_count_, 0U);
}
//
//// NOLINTNEXTLINE(hicpp-special-member-functions)
// TEST(PoolResource, SingleSmallBlockAllocations) {
// TestMemory test_mem;
// const size_t max_blocks_per_chunk = 3U;
// const size_t max_block_size = 64U;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
// // Fill the first chunk.
// CheckAllocation(&mem, 64U, 1U);
// // May allocate more than once due to bookkeeping.
// EXPECT_GE(test_mem.new_count_, 1U);
// // Reset tracking and continue filling the first chunk.
// test_mem.new_count_ = 0U;
// CheckAllocation(&mem, 64U, 64U);
// CheckAllocation(&mem, 64U);
// EXPECT_EQ(test_mem.new_count_, 0U);
// // Reset tracking and fill the second chunk
// test_mem.new_count_ = 0U;
// CheckAllocation(&mem, 64U, 32U);
// auto *ptr1 = CheckAllocation(&mem, 32U, 64U); // this will become 64b block
// auto *ptr2 = CheckAllocation(&mem, 64U, 32U);
// // We expect one allocation for chunk and at most one for bookkeeping.
// EXPECT_TRUE(test_mem.new_count_ >= 1U && test_mem.new_count_ <= 2U);
// test_mem.delete_count_ = 0U;
// mem.Deallocate(ptr1, 32U, 64U);
// mem.Deallocate(ptr2, 64U, 32U);
// EXPECT_EQ(test_mem.delete_count_, 0U);
// mem.Release();
// EXPECT_GE(test_mem.delete_count_, 2U);
// CheckAllocation(&mem, 64U, 1U);
// }
//
//// NOLINTNEXTLINE(hicpp-special-member-functions)
// TEST(PoolResource, MultipleSmallBlockAllocations) {
// TestMemory test_mem;
// const size_t max_blocks_per_chunk = 1U;
// const size_t max_block_size = 64U;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
// CheckAllocation(&mem, 64U);
// CheckAllocation(&mem, 18U, 2U);
// CheckAllocation(&mem, 24U, 8U);
// // May allocate more than once per chunk due to bookkeeping.
// EXPECT_GE(test_mem.new_count_, 3U);
// // Reset tracking and fill the second chunk
// test_mem.new_count_ = 0U;
// CheckAllocation(&mem, 64U);
// CheckAllocation(&mem, 18U, 2U);
// CheckAllocation(&mem, 24U, 8U);
// // We expect one allocation for chunk and at most one for bookkeeping.
// EXPECT_TRUE(test_mem.new_count_ >= 3U && test_mem.new_count_ <= 6U);
// mem.Release();
// EXPECT_GE(test_mem.delete_count_, 6U);
// CheckAllocation(&mem, 64U);
// }
//
//// NOLINTNEXTLINE(hicpp-special-member-functions)
// TEST(PoolResource, BigBlockAllocations) {
// TestMemory test_mem;
// TestMemory test_mem_unpooled;
// const size_t max_blocks_per_chunk = 3U;
// const size_t max_block_size = 64U;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem, &test_mem_unpooled);
// CheckAllocation(&mem, max_block_size + 1, 1U);
// // May allocate more than once per block due to bookkeeping.
// EXPECT_GE(test_mem_unpooled.new_count_, 1U);
// CheckAllocation(&mem, max_block_size + 1, 1U);
// EXPECT_GE(test_mem_unpooled.new_count_, 2U);
// auto *ptr = CheckAllocation(&mem, max_block_size * 2, 1U);
// EXPECT_GE(test_mem_unpooled.new_count_, 3U);
// mem.Deallocate(ptr, max_block_size * 2, 1U);
// EXPECT_GE(test_mem_unpooled.delete_count_, 1U);
// mem.Release();
// EXPECT_GE(test_mem_unpooled.delete_count_, 3U);
// CheckAllocation(&mem, max_block_size + 1, 1U);
// }
//
//// NOLINTNEXTLINE(hicpp-special-member-functions)
// TEST(PoolResource, BlockSizeIsNotMultipleOfAlignment) {
// const size_t max_blocks_per_chunk = 3U;
// const size_t max_block_size = 64U;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size);
// EXPECT_THROW(mem.Allocate(64U, 24U), std::bad_alloc);
// EXPECT_THROW(mem.Allocate(63U), std::bad_alloc);
// EXPECT_THROW(mem.Allocate(max_block_size + 1, max_block_size), std::bad_alloc);
// }
//
//// NOLINTNEXTLINE(hicpp-special-member-functions)
// TEST(PoolResource, AllocationWithOverflow) {
// {
// const size_t max_blocks_per_chunk = 2U;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, std::numeric_limits<size_t>::max());
// EXPECT_THROW(mem.Allocate(std::numeric_limits<size_t>::max(), 1U), std::bad_alloc);
// // Throws because initial chunk block is aligned to
// // memgraph::utils::Ceil2(block_size), which wraps in this case.
// EXPECT_THROW(mem.Allocate((std::numeric_limits<size_t>::max() - 1U) / max_blocks_per_chunk, 1U), std::bad_alloc);
// }
// {
// const size_t max_blocks_per_chunk = memgraph::utils::impl::Pool::MaxBlocksInChunk;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, std::numeric_limits<size_t>::max());
// EXPECT_THROW(mem.Allocate(std::numeric_limits<size_t>::max(), 1U), std::bad_alloc);
// // Throws because initial chunk block is aligned to
// // memgraph::utils::Ceil2(block_size), which wraps in this case.
// EXPECT_THROW(mem.Allocate((std::numeric_limits<size_t>::max() - 1U) / max_blocks_per_chunk, 1U), std::bad_alloc);
// }
// }
//
// TEST(PoolResource, BlockDeallocation) {
// TestMemory test_mem;
// const size_t max_blocks_per_chunk = 2U;
// const size_t max_block_size = 64U;
// memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
// auto *ptr = CheckAllocation(&mem, max_block_size);
// test_mem.new_count_ = 0U;
// // Do another allocation before deallocating `ptr`, so that we are sure that
// // the chunk of 2 blocks is still alive and therefore `ptr` may be reused when
// // it's deallocated. If we deallocate now, the implementation may choose to
// // free the whole chunk, and we do not want that for the purposes of this
// // test.
// CheckAllocation(&mem, max_block_size);
// EXPECT_EQ(test_mem.new_count_, 0U);
// EXPECT_EQ(test_mem.delete_count_, 0U);
// mem.Deallocate(ptr, max_block_size);
// EXPECT_EQ(test_mem.delete_count_, 0U);
// // CheckAllocation(&mem, max_block_size) will fail as PoolResource should
// // reuse free blocks.
// EXPECT_EQ(ptr, mem.Allocate(max_block_size));
// EXPECT_EQ(test_mem.new_count_, 0U);
// }
class AllocationTrackingMemory final : public memgraph::utils::MemoryResource {
public: