Order chunks in utils::Pool to speed up deallocation (#898)

This commit is contained in:
János Benjamin Antal 2023-05-02 13:08:20 +02:00 committed by GitHub
parent eead0f79fc
commit 3a5f140c2b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 26 additions and 17 deletions

View File

@ -1066,16 +1066,18 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *strea
utils::ResourceWithOutOfMemoryException resource_with_exception;
utils::MonotonicBufferResource monotonic_memory{&stack_data[0], stack_size, &resource_with_exception};
std::optional<utils::PoolResource> pool_memory;
static constexpr auto kMaxBlockPerChunks = 128;
if (!use_monotonic_memory_) {
pool_memory.emplace(8, kExecutionPoolMaxBlockSize, &resource_with_exception, &resource_with_exception);
pool_memory.emplace(kMaxBlockPerChunks, kExecutionPoolMaxBlockSize, &resource_with_exception,
&resource_with_exception);
} else {
// We can throw on every query because a simple queries for deleting will use only
// the stack allocated buffer.
// Also, we want to throw only when the query engine requests more memory and not the storage
// so we add the exception to the allocator.
// TODO (mferencevic): Tune the parameters accordingly.
pool_memory.emplace(128, 1024, &monotonic_memory, &resource_with_exception);
pool_memory.emplace(kMaxBlockPerChunks, 1024, &monotonic_memory, &resource_with_exception);
}
std::optional<utils::LimitedMemoryResource> maybe_limited_resource;

View File

@ -1,4 +1,4 @@
// Copyright 2022 Memgraph Ltd.
// Copyright 2023 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
@ -187,14 +187,19 @@ void *Pool::Allocate() {
for (unsigned char i = 0U; i < blocks_per_chunk_; ++i) {
*(data + (i * block_size_)) = i + 1U;
}
Chunk chunk{data, 0, blocks_per_chunk_};
// Insert the big block in the sorted position.
auto it = std::lower_bound(chunks_.begin(), chunks_.end(), chunk,
[](const auto &a, const auto &b) { return a.data < b.data; });
try {
chunks_.push_back(Chunk{data, 0, blocks_per_chunk_});
it = chunks_.insert(it, chunk);
} catch (...) {
GetUpstreamResource()->Deallocate(data, data_size, alignment);
throw;
}
last_alloc_chunk_ = &chunks_.back();
last_dealloc_chunk_ = &chunks_.back();
last_alloc_chunk_ = &*it;
last_dealloc_chunk_ = &*it;
return allocate_block_from_chunk(last_alloc_chunk_);
}
@ -223,18 +228,20 @@ void Pool::Deallocate(void *p) {
deallocate_block_from_chunk(last_dealloc_chunk_);
return;
}
// Find the chunk which served this allocation
for (auto &chunk : chunks_) {
if (is_in_chunk(chunk)) {
// Update last_alloc_chunk_ as well because it now has a free block.
// Additionally this corresponds with C++ pattern of allocations and
// deallocations being done in reverse order.
last_alloc_chunk_ = &chunk;
last_dealloc_chunk_ = &chunk;
deallocate_block_from_chunk(&chunk);
return;
}
}
Chunk chunk{reinterpret_cast<unsigned char *>(p) - blocks_per_chunk_ * block_size_, 0, 0};
auto it = std::lower_bound(chunks_.begin(), chunks_.end(), chunk,
[](const auto &a, const auto &b) { return a.data <= b.data; });
MG_ASSERT(it != chunks_.end(), "Failed deallocation in utils::Pool");
MG_ASSERT(is_in_chunk(*it), "Failed deallocation in utils::Pool");
// Update last_alloc_chunk_ as well because it now has a free block.
// Additionally this corresponds with C++ pattern of allocations and
// deallocations being done in reverse order.
last_alloc_chunk_ = &*it;
last_dealloc_chunk_ = &*it;
deallocate_block_from_chunk(last_dealloc_chunk_);
// TODO: We could release the Chunk to upstream memory
}