Merge branch 'master' into add-bug-tracking-workflow
This commit is contained in:
commit
0c1f4ae11c
@ -1283,7 +1283,7 @@ class Graph:
|
|||||||
raise InvalidContextError()
|
raise InvalidContextError()
|
||||||
self._graph.detach_delete_vertex(vertex._vertex)
|
self._graph.detach_delete_vertex(vertex._vertex)
|
||||||
|
|
||||||
def create_edge(self, from_vertex: Vertex, to_vertex: Vertex, edge_type: EdgeType) -> None:
|
def create_edge(self, from_vertex: Vertex, to_vertex: Vertex, edge_type: EdgeType) -> Edge:
|
||||||
"""
|
"""
|
||||||
Create an edge.
|
Create an edge.
|
||||||
|
|
||||||
@ -1292,13 +1292,16 @@ class Graph:
|
|||||||
to_vertex: `Vertex' to where edge is directed.
|
to_vertex: `Vertex' to where edge is directed.
|
||||||
edge_type: `EdgeType` defines the type of edge.
|
edge_type: `EdgeType` defines the type of edge.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Created `Edge`.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ImmutableObjectError: If `graph` is immutable.
|
ImmutableObjectError: If `graph` is immutable.
|
||||||
UnableToAllocateError: If unable to allocate an edge.
|
UnableToAllocateError: If unable to allocate an edge.
|
||||||
DeletedObjectError: If `from_vertex` or `to_vertex` has been deleted.
|
DeletedObjectError: If `from_vertex` or `to_vertex` has been deleted.
|
||||||
SerializationError: If `from_vertex` or `to_vertex` has been modified by another transaction.
|
SerializationError: If `from_vertex` or `to_vertex` has been modified by another transaction.
|
||||||
Examples:
|
Examples:
|
||||||
```graph.create_edge(from_vertex, vertex, edge_type)```
|
```edge = graph.create_edge(from_vertex, vertex, edge_type)```
|
||||||
"""
|
"""
|
||||||
if not self.is_valid():
|
if not self.is_valid():
|
||||||
raise InvalidContextError()
|
raise InvalidContextError()
|
||||||
|
1
release/mgp/.gitignore
vendored
1
release/mgp/.gitignore
vendored
@ -1,3 +1,4 @@
|
|||||||
.venv
|
.venv
|
||||||
dist
|
dist
|
||||||
mgp.py
|
mgp.py
|
||||||
|
poetry.lock
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "mgp"
|
name = "mgp"
|
||||||
version = "1.1.0"
|
version = "1.1.1"
|
||||||
description = "Memgraph's module for developing MAGE modules. Used only for type hinting!"
|
description = "Memgraph's module for developing MAGE modules. Used only for type hinting!"
|
||||||
authors = [
|
authors = [
|
||||||
"katarinasupe <katarina.supe@memgraph.io>",
|
"katarinasupe <katarina.supe@memgraph.io>",
|
||||||
|
@ -1044,7 +1044,7 @@ std::optional<plan::ProfilingStatsWithTotalTime> PullPlan::Pull(AnyStream *strea
|
|||||||
// Also, we want to throw only when the query engine requests more memory and not the storage
|
// Also, we want to throw only when the query engine requests more memory and not the storage
|
||||||
// so we add the exception to the allocator.
|
// so we add the exception to the allocator.
|
||||||
// TODO (mferencevic): Tune the parameters accordingly.
|
// TODO (mferencevic): Tune the parameters accordingly.
|
||||||
utils::PoolResource pool_memory(128, 1024, &monotonic_memory);
|
utils::PoolResource pool_memory(128, 1024, &monotonic_memory, utils::NewDeleteResource());
|
||||||
std::optional<utils::LimitedMemoryResource> maybe_limited_resource;
|
std::optional<utils::LimitedMemoryResource> maybe_limited_resource;
|
||||||
|
|
||||||
if (memory_limit_) {
|
if (memory_limit_) {
|
||||||
|
@ -4528,24 +4528,24 @@ auto ToOptionalString(ExpressionEvaluator *evaluator, Expression *expression) ->
|
|||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
};
|
};
|
||||||
|
|
||||||
TypedValue CsvRowToTypedList(csv::Reader::Row row) {
|
TypedValue CsvRowToTypedList(csv::Reader::Row &row) {
|
||||||
auto *mem = row.get_allocator().GetMemoryResource();
|
auto *mem = row.get_allocator().GetMemoryResource();
|
||||||
auto typed_columns = utils::pmr::vector<TypedValue>(mem);
|
auto typed_columns = utils::pmr::vector<TypedValue>(mem);
|
||||||
typed_columns.reserve(row.size());
|
typed_columns.reserve(row.size());
|
||||||
for (auto &column : row) {
|
for (auto &column : row) {
|
||||||
typed_columns.emplace_back(std::move(column));
|
typed_columns.emplace_back(std::move(column));
|
||||||
}
|
}
|
||||||
return TypedValue(typed_columns, mem);
|
return {std::move(typed_columns), mem};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypedValue CsvRowToTypedMap(csv::Reader::Row row, csv::Reader::Header header) {
|
TypedValue CsvRowToTypedMap(csv::Reader::Row &row, csv::Reader::Header header) {
|
||||||
// a valid row has the same number of elements as the header
|
// a valid row has the same number of elements as the header
|
||||||
auto *mem = row.get_allocator().GetMemoryResource();
|
auto *mem = row.get_allocator().GetMemoryResource();
|
||||||
utils::pmr::map<utils::pmr::string, TypedValue> m(mem);
|
utils::pmr::map<utils::pmr::string, TypedValue> m(mem);
|
||||||
for (auto i = 0; i < row.size(); ++i) {
|
for (auto i = 0; i < row.size(); ++i) {
|
||||||
m.emplace(std::move(header[i]), std::move(row[i]));
|
m.emplace(std::move(header[i]), std::move(row[i]));
|
||||||
}
|
}
|
||||||
return TypedValue(m, mem);
|
return {std::move(m), mem};
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -4584,18 +4584,17 @@ class LoadCsvCursor : public Cursor {
|
|||||||
// have to read at most cardinality(n) rows (but we can read less and stop
|
// have to read at most cardinality(n) rows (but we can read less and stop
|
||||||
// pulling MATCH).
|
// pulling MATCH).
|
||||||
if (!input_is_once_ && !input_pulled) return false;
|
if (!input_is_once_ && !input_pulled) return false;
|
||||||
|
auto row = reader_->GetNextRow(context.evaluation_context.memory);
|
||||||
if (auto row = reader_->GetNextRow(context.evaluation_context.memory)) {
|
if (!row) {
|
||||||
if (!reader_->HasHeader()) {
|
return false;
|
||||||
frame[self_->row_var_] = CsvRowToTypedList(std::move(*row));
|
|
||||||
} else {
|
|
||||||
frame[self_->row_var_] = CsvRowToTypedMap(
|
|
||||||
std::move(*row), csv::Reader::Header(reader_->GetHeader(), context.evaluation_context.memory));
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
if (!reader_->HasHeader()) {
|
||||||
return false;
|
frame[self_->row_var_] = CsvRowToTypedList(*row);
|
||||||
|
} else {
|
||||||
|
frame[self_->row_var_] =
|
||||||
|
CsvRowToTypedMap(*row, csv::Reader::Header(reader_->GetHeader(), context.evaluation_context.memory));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Reset() override { input_cursor_->Reset(); }
|
void Reset() override { input_cursor_->Reset(); }
|
||||||
|
@ -91,18 +91,25 @@ bool ReadWriteTypeChecker::PreVisit([[maybe_unused]] Foreach &op) {
|
|||||||
bool ReadWriteTypeChecker::Visit(Once &) { return false; } // NOLINT(hicpp-named-parameter)
|
bool ReadWriteTypeChecker::Visit(Once &) { return false; } // NOLINT(hicpp-named-parameter)
|
||||||
|
|
||||||
void ReadWriteTypeChecker::UpdateType(RWType op_type) {
|
void ReadWriteTypeChecker::UpdateType(RWType op_type) {
|
||||||
// Update type only if it's not the NONE type and the current operator's type
|
|
||||||
// is different than the one that's currently inferred.
|
|
||||||
if (type != RWType::NONE && type != op_type) {
|
|
||||||
type = RWType::RW;
|
|
||||||
}
|
|
||||||
// Stop inference because RW is the most "dominant" type, i.e. it isn't
|
// Stop inference because RW is the most "dominant" type, i.e. it isn't
|
||||||
// affected by the type of nodes in the plan appearing after the node for
|
// affected by the type of nodes in the plan appearing after the node for
|
||||||
// which the type is set to RW.
|
// which the type is set to RW.
|
||||||
if (type == RWType::RW) {
|
if (type == RWType::RW) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (type == RWType::NONE && op_type != RWType::NONE) {
|
|
||||||
|
// if op_type is NONE, type doesn't change.
|
||||||
|
if (op_type == RWType::NONE) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update type only if it's not the NONE type and the current operator's type
|
||||||
|
// is different than the one that's currently inferred.
|
||||||
|
if (type != RWType::NONE && type != op_type) {
|
||||||
|
type = RWType::RW;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (type == RWType::NONE) {
|
||||||
type = op_type;
|
type = op_type;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
|
|
||||||
namespace memgraph::query::plan {
|
namespace memgraph::query::plan {
|
||||||
|
|
||||||
class ReadWriteTypeChecker : public virtual HierarchicalLogicalOperatorVisitor {
|
struct ReadWriteTypeChecker : public virtual HierarchicalLogicalOperatorVisitor {
|
||||||
public:
|
public:
|
||||||
ReadWriteTypeChecker() = default;
|
ReadWriteTypeChecker() = default;
|
||||||
|
|
||||||
@ -89,7 +89,6 @@ class ReadWriteTypeChecker : public virtual HierarchicalLogicalOperatorVisitor {
|
|||||||
|
|
||||||
bool Visit(Once &) override;
|
bool Visit(Once &) override;
|
||||||
|
|
||||||
private:
|
|
||||||
void UpdateType(RWType op_type);
|
void UpdateType(RWType op_type);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ std::optional<utils::pmr::string> Reader::GetNextLine(utils::MemoryResource *mem
|
|||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
++line_count_;
|
++line_count_;
|
||||||
return line;
|
return std::move(line);
|
||||||
}
|
}
|
||||||
|
|
||||||
Reader::ParsingResult Reader::ParseHeader() {
|
Reader::ParsingResult Reader::ParseHeader() {
|
||||||
|
@ -251,9 +251,10 @@ void Pool::Release() {
|
|||||||
|
|
||||||
} // namespace impl
|
} // namespace impl
|
||||||
|
|
||||||
PoolResource::PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory)
|
PoolResource::PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory_pools,
|
||||||
: pools_(memory),
|
MemoryResource *memory_unpooled)
|
||||||
unpooled_(memory),
|
: pools_(memory_pools),
|
||||||
|
unpooled_(memory_unpooled),
|
||||||
max_blocks_per_chunk_(std::min(max_blocks_per_chunk, static_cast<size_t>(impl::Pool::MaxBlocksInChunk()))),
|
max_blocks_per_chunk_(std::min(max_blocks_per_chunk, static_cast<size_t>(impl::Pool::MaxBlocksInChunk()))),
|
||||||
max_block_size_(max_block_size) {
|
max_block_size_(max_block_size) {
|
||||||
MG_ASSERT(max_blocks_per_chunk_ > 0U, "Invalid number of blocks per chunk");
|
MG_ASSERT(max_blocks_per_chunk_ > 0U, "Invalid number of blocks per chunk");
|
||||||
@ -273,14 +274,14 @@ void *PoolResource::DoAllocate(size_t bytes, size_t alignment) {
|
|||||||
if (block_size % alignment != 0) throw BadAlloc("Requested bytes must be a multiple of alignment");
|
if (block_size % alignment != 0) throw BadAlloc("Requested bytes must be a multiple of alignment");
|
||||||
if (block_size > max_block_size_) {
|
if (block_size > max_block_size_) {
|
||||||
// Allocate a big block.
|
// Allocate a big block.
|
||||||
BigBlock big_block{bytes, alignment, GetUpstreamResource()->Allocate(bytes, alignment)};
|
BigBlock big_block{bytes, alignment, GetUpstreamResourceBlocks()->Allocate(bytes, alignment)};
|
||||||
// Insert the big block in the sorted position.
|
// Insert the big block in the sorted position.
|
||||||
auto it = std::lower_bound(unpooled_.begin(), unpooled_.end(), big_block,
|
auto it = std::lower_bound(unpooled_.begin(), unpooled_.end(), big_block,
|
||||||
[](const auto &a, const auto &b) { return a.data < b.data; });
|
[](const auto &a, const auto &b) { return a.data < b.data; });
|
||||||
try {
|
try {
|
||||||
unpooled_.insert(it, big_block);
|
unpooled_.insert(it, big_block);
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
GetUpstreamResource()->Deallocate(big_block.data, bytes, alignment);
|
GetUpstreamResourceBlocks()->Deallocate(big_block.data, bytes, alignment);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
return big_block.data;
|
return big_block.data;
|
||||||
@ -318,7 +319,7 @@ void PoolResource::DoDeallocate(void *p, size_t bytes, size_t alignment) {
|
|||||||
MG_ASSERT(it != unpooled_.end(), "Failed deallocation");
|
MG_ASSERT(it != unpooled_.end(), "Failed deallocation");
|
||||||
MG_ASSERT(it->data == p && it->bytes == bytes && it->alignment == alignment, "Failed deallocation");
|
MG_ASSERT(it->data == p && it->bytes == bytes && it->alignment == alignment, "Failed deallocation");
|
||||||
unpooled_.erase(it);
|
unpooled_.erase(it);
|
||||||
GetUpstreamResource()->Deallocate(p, bytes, alignment);
|
GetUpstreamResourceBlocks()->Deallocate(p, bytes, alignment);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Deallocate a regular block, first check if last_dealloc_pool_ is suitable.
|
// Deallocate a regular block, first check if last_dealloc_pool_ is suitable.
|
||||||
@ -339,7 +340,7 @@ void PoolResource::Release() {
|
|||||||
for (auto &pool : pools_) pool.Release();
|
for (auto &pool : pools_) pool.Release();
|
||||||
pools_.clear();
|
pools_.clear();
|
||||||
for (auto &big_block : unpooled_)
|
for (auto &big_block : unpooled_)
|
||||||
GetUpstreamResource()->Deallocate(big_block.data, big_block.bytes, big_block.alignment);
|
GetUpstreamResourceBlocks()->Deallocate(big_block.data, big_block.bytes, big_block.alignment);
|
||||||
unpooled_.clear();
|
unpooled_.clear();
|
||||||
last_alloc_pool_ = nullptr;
|
last_alloc_pool_ = nullptr;
|
||||||
last_dealloc_pool_ = nullptr;
|
last_dealloc_pool_ = nullptr;
|
||||||
|
@ -469,7 +469,8 @@ class PoolResource final : public MemoryResource {
|
|||||||
/// impl::Pool::MaxBlocksInChunk()) as the real maximum number of blocks per
|
/// impl::Pool::MaxBlocksInChunk()) as the real maximum number of blocks per
|
||||||
/// chunk. Allocation requests exceeding max_block_size are simply forwarded
|
/// chunk. Allocation requests exceeding max_block_size are simply forwarded
|
||||||
/// to upstream memory.
|
/// to upstream memory.
|
||||||
PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory = NewDeleteResource());
|
PoolResource(size_t max_blocks_per_chunk, size_t max_block_size, MemoryResource *memory_pools = NewDeleteResource(),
|
||||||
|
MemoryResource *memory_unpooled = NewDeleteResource());
|
||||||
|
|
||||||
PoolResource(const PoolResource &) = delete;
|
PoolResource(const PoolResource &) = delete;
|
||||||
PoolResource &operator=(const PoolResource &) = delete;
|
PoolResource &operator=(const PoolResource &) = delete;
|
||||||
@ -480,6 +481,7 @@ class PoolResource final : public MemoryResource {
|
|||||||
~PoolResource() override { Release(); }
|
~PoolResource() override { Release(); }
|
||||||
|
|
||||||
MemoryResource *GetUpstreamResource() const { return pools_.get_allocator().GetMemoryResource(); }
|
MemoryResource *GetUpstreamResource() const { return pools_.get_allocator().GetMemoryResource(); }
|
||||||
|
MemoryResource *GetUpstreamResourceBlocks() const { return unpooled_.get_allocator().GetMemoryResource(); }
|
||||||
|
|
||||||
/// Release all allocated memory.
|
/// Release all allocated memory.
|
||||||
void Release();
|
void Release();
|
||||||
|
@ -142,7 +142,7 @@ parser.add_argument(
|
|||||||
with the presence of 300 write queries from write type or 30%""",
|
with the presence of 300 write queries from write type or 30%""",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument("--tail-latency", type=int, default=100, help="Number of queries for the tail latency statistics")
|
parser.add_argument("--tail-latency", type=int, default=0, help="Number of queries for the tail latency statistics")
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--performance-tracking",
|
"--performance-tracking",
|
||||||
@ -223,8 +223,17 @@ def filter_benchmarks(generators, patterns):
|
|||||||
patterns,
|
patterns,
|
||||||
):
|
):
|
||||||
current[group].append((query_name, query_func))
|
current[group].append((query_name, query_func))
|
||||||
if len(current) > 0:
|
if len(current) == 0:
|
||||||
filtered.append((generator(variant, args.vendor_name), dict(current)))
|
continue
|
||||||
|
|
||||||
|
# Ignore benchgraph "basic" queries in standard CI/CD run
|
||||||
|
for pattern in patterns:
|
||||||
|
res = pattern.count("*")
|
||||||
|
key = "basic"
|
||||||
|
if res >= 2 and key in current.keys():
|
||||||
|
current.pop(key)
|
||||||
|
|
||||||
|
filtered.append((generator(variant, args.vendor_name), dict(current)))
|
||||||
return filtered
|
return filtered
|
||||||
|
|
||||||
|
|
||||||
@ -241,30 +250,34 @@ def warmup(client):
|
|||||||
|
|
||||||
|
|
||||||
def tail_latency(vendor, client, func):
|
def tail_latency(vendor, client, func):
|
||||||
vendor.start_benchmark("tail_latency")
|
|
||||||
if args.warmup_run:
|
|
||||||
warmup(client)
|
|
||||||
latency = []
|
|
||||||
iteration = args.tail_latency
|
iteration = args.tail_latency
|
||||||
query_list = get_queries(func, iteration)
|
if iteration >= 10:
|
||||||
for i in range(0, iteration):
|
vendor.start_benchmark("tail_latency")
|
||||||
ret = client.execute(queries=[query_list[i]], num_workers=1)
|
if args.warmup_run:
|
||||||
latency.append(ret[0]["duration"])
|
warmup(client)
|
||||||
latency.sort()
|
latency = []
|
||||||
query_stats = {
|
|
||||||
"iterations": iteration,
|
query_list = get_queries(func, iteration)
|
||||||
"min": latency[0],
|
for i in range(0, iteration):
|
||||||
"max": latency[iteration - 1],
|
ret = client.execute(queries=[query_list[i]], num_workers=1)
|
||||||
"mean": statistics.mean(latency),
|
latency.append(ret[0]["duration"])
|
||||||
"p99": latency[math.floor(iteration * 0.99) - 1],
|
latency.sort()
|
||||||
"p95": latency[math.floor(iteration * 0.95) - 1],
|
query_stats = {
|
||||||
"p90": latency[math.floor(iteration * 0.90) - 1],
|
"iterations": iteration,
|
||||||
"p75": latency[math.floor(iteration * 0.75) - 1],
|
"min": latency[0],
|
||||||
"p50": latency[math.floor(iteration * 0.50) - 1],
|
"max": latency[iteration - 1],
|
||||||
}
|
"mean": statistics.mean(latency),
|
||||||
print("Query statistics for tail latency: ")
|
"p99": latency[math.floor(iteration * 0.99) - 1],
|
||||||
print(query_stats)
|
"p95": latency[math.floor(iteration * 0.95) - 1],
|
||||||
vendor.stop("tail_latency")
|
"p90": latency[math.floor(iteration * 0.90) - 1],
|
||||||
|
"p75": latency[math.floor(iteration * 0.75) - 1],
|
||||||
|
"p50": latency[math.floor(iteration * 0.50) - 1],
|
||||||
|
}
|
||||||
|
print("Query statistics for tail latency: ")
|
||||||
|
print(query_stats)
|
||||||
|
vendor.stop("tail_latency")
|
||||||
|
else:
|
||||||
|
query_stats = {}
|
||||||
return query_stats
|
return query_stats
|
||||||
|
|
||||||
|
|
||||||
|
@ -147,6 +147,8 @@ def run_full_benchmarks(vendor, binary, dataset_size, dataset_group, realistic,
|
|||||||
"12",
|
"12",
|
||||||
"--no-authorization",
|
"--no-authorization",
|
||||||
"pokec/" + dataset_size + "/" + dataset_group + "/*",
|
"pokec/" + dataset_size + "/" + dataset_group + "/*",
|
||||||
|
"--tail-latency",
|
||||||
|
"100",
|
||||||
]
|
]
|
||||||
|
|
||||||
for config in configurations:
|
for config in configurations:
|
||||||
|
@ -253,3 +253,31 @@ TEST_F(ReadWriteTypeCheckTest, Foreach) {
|
|||||||
std::shared_ptr<LogicalOperator> foreach = std::make_shared<plan::Foreach>(nullptr, nullptr, nullptr, x);
|
std::shared_ptr<LogicalOperator> foreach = std::make_shared<plan::Foreach>(nullptr, nullptr, nullptr, x);
|
||||||
CheckPlanType(foreach.get(), RWType::RW);
|
CheckPlanType(foreach.get(), RWType::RW);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(ReadWriteTypeCheckTest, CheckUpdateType) {
|
||||||
|
std::array<std::array<RWType, 3>, 16> scenarios = {{
|
||||||
|
{RWType::NONE, RWType::NONE, RWType::NONE},
|
||||||
|
{RWType::NONE, RWType::R, RWType::R},
|
||||||
|
{RWType::NONE, RWType::W, RWType::W},
|
||||||
|
{RWType::NONE, RWType::RW, RWType::RW},
|
||||||
|
{RWType::R, RWType::NONE, RWType::R},
|
||||||
|
{RWType::R, RWType::R, RWType::R},
|
||||||
|
{RWType::R, RWType::W, RWType::RW},
|
||||||
|
{RWType::R, RWType::RW, RWType::RW},
|
||||||
|
{RWType::W, RWType::NONE, RWType::W},
|
||||||
|
{RWType::W, RWType::R, RWType::RW},
|
||||||
|
{RWType::W, RWType::W, RWType::W},
|
||||||
|
{RWType::W, RWType::RW, RWType::RW},
|
||||||
|
{RWType::RW, RWType::NONE, RWType::RW},
|
||||||
|
{RWType::RW, RWType::R, RWType::RW},
|
||||||
|
{RWType::RW, RWType::W, RWType::RW},
|
||||||
|
{RWType::RW, RWType::RW, RWType::RW},
|
||||||
|
}};
|
||||||
|
|
||||||
|
auto rw_type_checker = ReadWriteTypeChecker();
|
||||||
|
for (auto scenario : scenarios) {
|
||||||
|
rw_type_checker.type = scenario[0];
|
||||||
|
rw_type_checker.UpdateType(scenario[1]);
|
||||||
|
EXPECT_EQ(scenario[2], rw_type_checker.type);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -252,20 +252,21 @@ TEST(PoolResource, MultipleSmallBlockAllocations) {
|
|||||||
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
// NOLINTNEXTLINE(hicpp-special-member-functions)
|
||||||
TEST(PoolResource, BigBlockAllocations) {
|
TEST(PoolResource, BigBlockAllocations) {
|
||||||
TestMemory test_mem;
|
TestMemory test_mem;
|
||||||
|
TestMemory test_mem_unpooled;
|
||||||
const size_t max_blocks_per_chunk = 3U;
|
const size_t max_blocks_per_chunk = 3U;
|
||||||
const size_t max_block_size = 64U;
|
const size_t max_block_size = 64U;
|
||||||
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem);
|
memgraph::utils::PoolResource mem(max_blocks_per_chunk, max_block_size, &test_mem, &test_mem_unpooled);
|
||||||
CheckAllocation(&mem, max_block_size + 1, 1U);
|
CheckAllocation(&mem, max_block_size + 1, 1U);
|
||||||
// May allocate more than once per block due to bookkeeping.
|
// May allocate more than once per block due to bookkeeping.
|
||||||
EXPECT_GE(test_mem.new_count_, 1U);
|
EXPECT_GE(test_mem_unpooled.new_count_, 1U);
|
||||||
CheckAllocation(&mem, max_block_size + 1, 1U);
|
CheckAllocation(&mem, max_block_size + 1, 1U);
|
||||||
EXPECT_GE(test_mem.new_count_, 2U);
|
EXPECT_GE(test_mem_unpooled.new_count_, 2U);
|
||||||
auto *ptr = CheckAllocation(&mem, max_block_size * 2, 1U);
|
auto *ptr = CheckAllocation(&mem, max_block_size * 2, 1U);
|
||||||
EXPECT_GE(test_mem.new_count_, 3U);
|
EXPECT_GE(test_mem_unpooled.new_count_, 3U);
|
||||||
mem.Deallocate(ptr, max_block_size * 2, 1U);
|
mem.Deallocate(ptr, max_block_size * 2, 1U);
|
||||||
EXPECT_GE(test_mem.delete_count_, 1U);
|
EXPECT_GE(test_mem_unpooled.delete_count_, 1U);
|
||||||
mem.Release();
|
mem.Release();
|
||||||
EXPECT_GE(test_mem.delete_count_, 3U);
|
EXPECT_GE(test_mem_unpooled.delete_count_, 3U);
|
||||||
CheckAllocation(&mem, max_block_size + 1, 1U);
|
CheckAllocation(&mem, max_block_size + 1, 1U);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user