diff --git a/src/query/interpreter.cpp b/src/query/interpreter.cpp index 93eba4d59..916043e7b 100644 --- a/src/query/interpreter.cpp +++ b/src/query/interpreter.cpp @@ -1066,16 +1066,18 @@ std::optional PullPlan::Pull(AnyStream *strea utils::ResourceWithOutOfMemoryException resource_with_exception; utils::MonotonicBufferResource monotonic_memory{&stack_data[0], stack_size, &resource_with_exception}; std::optional pool_memory; + static constexpr auto kMaxBlockPerChunks = 128; if (!use_monotonic_memory_) { - pool_memory.emplace(8, kExecutionPoolMaxBlockSize, &resource_with_exception, &resource_with_exception); + pool_memory.emplace(kMaxBlockPerChunks, kExecutionPoolMaxBlockSize, &resource_with_exception, + &resource_with_exception); } else { // We can throw on every query because a simple queries for deleting will use only // the stack allocated buffer. // Also, we want to throw only when the query engine requests more memory and not the storage // so we add the exception to the allocator. // TODO (mferencevic): Tune the parameters accordingly. - pool_memory.emplace(128, 1024, &monotonic_memory, &resource_with_exception); + pool_memory.emplace(kMaxBlockPerChunks, 1024, &monotonic_memory, &resource_with_exception); } std::optional maybe_limited_resource; diff --git a/src/utils/memory.cpp b/src/utils/memory.cpp index f1cfca4e0..849a7c3b1 100644 --- a/src/utils/memory.cpp +++ b/src/utils/memory.cpp @@ -1,4 +1,4 @@ -// Copyright 2022 Memgraph Ltd. +// Copyright 2023 Memgraph Ltd. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source @@ -187,14 +187,19 @@ void *Pool::Allocate() { for (unsigned char i = 0U; i < blocks_per_chunk_; ++i) { *(data + (i * block_size_)) = i + 1U; } + Chunk chunk{data, 0, blocks_per_chunk_}; + // Insert the big block in the sorted position. + auto it = std::lower_bound(chunks_.begin(), chunks_.end(), chunk, + [](const auto &a, const auto &b) { return a.data < b.data; }); try { - chunks_.push_back(Chunk{data, 0, blocks_per_chunk_}); + it = chunks_.insert(it, chunk); } catch (...) { GetUpstreamResource()->Deallocate(data, data_size, alignment); throw; } - last_alloc_chunk_ = &chunks_.back(); - last_dealloc_chunk_ = &chunks_.back(); + + last_alloc_chunk_ = &*it; + last_dealloc_chunk_ = &*it; return allocate_block_from_chunk(last_alloc_chunk_); } @@ -223,18 +228,20 @@ void Pool::Deallocate(void *p) { deallocate_block_from_chunk(last_dealloc_chunk_); return; } + // Find the chunk which served this allocation - for (auto &chunk : chunks_) { - if (is_in_chunk(chunk)) { - // Update last_alloc_chunk_ as well because it now has a free block. - // Additionally this corresponds with C++ pattern of allocations and - // deallocations being done in reverse order. - last_alloc_chunk_ = &chunk; - last_dealloc_chunk_ = &chunk; - deallocate_block_from_chunk(&chunk); - return; - } - } + Chunk chunk{reinterpret_cast(p) - blocks_per_chunk_ * block_size_, 0, 0}; + auto it = std::lower_bound(chunks_.begin(), chunks_.end(), chunk, + [](const auto &a, const auto &b) { return a.data <= b.data; }); + MG_ASSERT(it != chunks_.end(), "Failed deallocation in utils::Pool"); + MG_ASSERT(is_in_chunk(*it), "Failed deallocation in utils::Pool"); + + // Update last_alloc_chunk_ as well because it now has a free block. + // Additionally this corresponds with C++ pattern of allocations and + // deallocations being done in reverse order. + last_alloc_chunk_ = &*it; + last_dealloc_chunk_ = &*it; + deallocate_block_from_chunk(last_dealloc_chunk_); // TODO: We could release the Chunk to upstream memory }