Fix benchmark test
This commit is contained in:
parent
e6c80e7dc9
commit
177de2fa2e
@ -386,12 +386,12 @@ Shard::~Shard() {}
|
|||||||
|
|
||||||
std::unique_ptr<Shard> Shard::FromSplitData(SplitData &&split_data) {
|
std::unique_ptr<Shard> Shard::FromSplitData(SplitData &&split_data) {
|
||||||
if (split_data.config.items.properties_on_edges) [[likely]] {
|
if (split_data.config.items.properties_on_edges) [[likely]] {
|
||||||
return std::make_unique<Shard>(split_data.primary_label, split_data.min_primary_key, split_data.min_primary_key,
|
return std::make_unique<Shard>(split_data.primary_label, split_data.min_primary_key, split_data.max_primary_key,
|
||||||
split_data.schema, std::move(split_data.vertices), std::move(*split_data.edges),
|
split_data.schema, std::move(split_data.vertices), std::move(*split_data.edges),
|
||||||
std::move(split_data.transactions), split_data.config, split_data.id_to_name,
|
std::move(split_data.transactions), split_data.config, split_data.id_to_name,
|
||||||
split_data.shard_version);
|
split_data.shard_version);
|
||||||
}
|
}
|
||||||
return std::make_unique<Shard>(split_data.primary_label, split_data.min_primary_key, split_data.min_primary_key,
|
return std::make_unique<Shard>(split_data.primary_label, split_data.min_primary_key, split_data.max_primary_key,
|
||||||
split_data.schema, std::move(split_data.vertices), std::move(split_data.transactions),
|
split_data.schema, std::move(split_data.vertices), std::move(split_data.transactions),
|
||||||
split_data.config, split_data.id_to_name, split_data.shard_version);
|
split_data.config, split_data.id_to_name, split_data.shard_version);
|
||||||
}
|
}
|
||||||
@ -1115,8 +1115,9 @@ std::optional<SplitInfo> Shard::ShouldSplit() const noexcept {
|
|||||||
|
|
||||||
SplitData Shard::PerformSplit(const PrimaryKey &split_key, const uint64_t shard_version) {
|
SplitData Shard::PerformSplit(const PrimaryKey &split_key, const uint64_t shard_version) {
|
||||||
shard_version_ = shard_version;
|
shard_version_ = shard_version;
|
||||||
|
const auto old_max_key = max_primary_key_;
|
||||||
max_primary_key_ = split_key;
|
max_primary_key_ = split_key;
|
||||||
return shard_splitter_.SplitShard(split_key, max_primary_key_, shard_version);
|
return shard_splitter_.SplitShard(split_key, old_max_key, shard_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Shard::IsVertexBelongToShard(const VertexId &vertex_id) const {
|
bool Shard::IsVertexBelongToShard(const VertexId &vertex_id) const {
|
||||||
|
@ -64,11 +64,12 @@ class ShardSplitBenchmark : public ::benchmark::Fixture {
|
|||||||
};
|
};
|
||||||
|
|
||||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplit)(::benchmark::State &state) {
|
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplit)(::benchmark::State &state) {
|
||||||
|
const auto number_of_vertices{state.range(0)};
|
||||||
std::random_device r;
|
std::random_device r;
|
||||||
std::default_random_engine e1(r());
|
std::default_random_engine e1(r());
|
||||||
std::uniform_int_distribution<int> uniform_dist(0, state.range(0));
|
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||||
|
|
||||||
for (int64_t i{0}; i < state.range(0); ++i) {
|
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||||
auto acc = storage->Access(GetNextHlc());
|
auto acc = storage->Access(GetNextHlc());
|
||||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||||
{{secondary_property, PropertyValue(i)}})
|
{{secondary_property, PropertyValue(i)}})
|
||||||
@ -86,16 +87,17 @@ BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplit)(::benchmark::State &state)
|
|||||||
acc.Commit(GetNextHlc());
|
acc.Commit(GetNextHlc());
|
||||||
}
|
}
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{state.range(0) / 2}}, 2);
|
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithGc)(::benchmark::State &state) {
|
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithGc)(::benchmark::State &state) {
|
||||||
|
const auto number_of_vertices{state.range(0)};
|
||||||
std::random_device r;
|
std::random_device r;
|
||||||
std::default_random_engine e1(r());
|
std::default_random_engine e1(r());
|
||||||
std::uniform_int_distribution<int> uniform_dist(0, state.range(0));
|
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||||
|
|
||||||
for (int64_t i{0}; i < state.range(0); ++i) {
|
for (int64_t i{0}; i < number_of_vertices; ++i) {
|
||||||
auto acc = storage->Access(GetNextHlc());
|
auto acc = storage->Access(GetNextHlc());
|
||||||
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
MG_ASSERT(acc.CreateVertexAndValidate({secondary_label}, PrimaryKey{PropertyValue(i)},
|
||||||
{{secondary_property, PropertyValue(i)}})
|
{{secondary_property, PropertyValue(i)}})
|
||||||
@ -114,17 +116,20 @@ BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithGc)(::benchmark::State &
|
|||||||
}
|
}
|
||||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{state.range(0) / 2}}, 2);
|
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactions)(::benchmark::State &state) {
|
BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactions)(::benchmark::State &state) {
|
||||||
|
const auto number_of_vertices = state.range(0);
|
||||||
|
const auto number_of_edges = state.range(1);
|
||||||
|
const auto number_of_transactions = state.range(2);
|
||||||
std::random_device r;
|
std::random_device r;
|
||||||
std::default_random_engine e1(r());
|
std::default_random_engine e1(r());
|
||||||
std::uniform_int_distribution<int> uniform_dist(0, state.range(0));
|
std::uniform_int_distribution<int> uniform_dist(0, number_of_vertices);
|
||||||
|
|
||||||
const auto max_transactions_needed = std::max(state.range(0), state.range(1));
|
const auto max_transactions_needed = std::max(number_of_vertices, number_of_edges);
|
||||||
for (int64_t vertex_counter{state.range(0)}, edge_counter{state.range(1)}, i{0};
|
for (int64_t vertex_counter{number_of_vertices}, edge_counter{number_of_edges}, i{0};
|
||||||
vertex_counter > 0 || edge_counter > 0; --vertex_counter, --edge_counter, ++i) {
|
vertex_counter > 0 || edge_counter > 0; --vertex_counter, --edge_counter, ++i) {
|
||||||
auto acc = storage->Access(GetNextHlc());
|
auto acc = storage->Access(GetNextHlc());
|
||||||
if (vertex_counter > 0) {
|
if (vertex_counter > 0) {
|
||||||
@ -132,11 +137,10 @@ BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactions)(::bench
|
|||||||
{{secondary_property, PropertyValue(i)}})
|
{{secondary_property, PropertyValue(i)}})
|
||||||
.HasValue(),
|
.HasValue(),
|
||||||
"Failed creating with pk {}", i);
|
"Failed creating with pk {}", i);
|
||||||
++i;
|
|
||||||
}
|
}
|
||||||
if (edge_counter > 0 && i > 1) {
|
if (edge_counter > 0 && i > 1) {
|
||||||
const auto vtx1 = uniform_dist(e1) % i;
|
const auto vtx1 = uniform_dist(e1) % std::min(i, number_of_vertices);
|
||||||
const auto vtx2 = uniform_dist(e1) % i;
|
const auto vtx2 = uniform_dist(e1) % std::min(i, number_of_vertices);
|
||||||
|
|
||||||
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
MG_ASSERT(acc.CreateEdge(VertexId{primary_label, {PropertyValue(vtx1)}},
|
||||||
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
VertexId{primary_label, {PropertyValue(vtx2)}}, edge_type_id, Gid::FromUint(i))
|
||||||
@ -145,14 +149,14 @@ BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactions)(::bench
|
|||||||
}
|
}
|
||||||
|
|
||||||
acc.Commit(GetNextHlc());
|
acc.Commit(GetNextHlc());
|
||||||
if (i >= max_transactions_needed - state.range(2)) {
|
if (i == max_transactions_needed - number_of_transactions) {
|
||||||
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
storage->CollectGarbage(GetNextHlc().coordinator_wall_clock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto _ : state) {
|
for (auto _ : state) {
|
||||||
// Don't create shard since shard deallocation can take some time as well
|
// Don't create shard since shard deallocation can take some time as well
|
||||||
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{state.range(0) / 2}}, 2);
|
auto data = storage->PerformSplit(PrimaryKey{PropertyValue{number_of_vertices / 2}}, 2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +165,7 @@ BENCHMARK_DEFINE_F(ShardSplitBenchmark, BigDataSplitWithFewTransactions)(::bench
|
|||||||
// This run is pessimistic, number of vertices corresponds with number if transactions
|
// This run is pessimistic, number of vertices corresponds with number if transactions
|
||||||
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplit)
|
BENCHMARK_REGISTER_F(ShardSplitBenchmark, BigDataSplit)
|
||||||
->RangeMultiplier(10)
|
->RangeMultiplier(10)
|
||||||
->Range(100'000, 1'000'000)
|
->Range(100'000, 100'000)
|
||||||
->Unit(::benchmark::kMillisecond);
|
->Unit(::benchmark::kMillisecond);
|
||||||
|
|
||||||
// Range:
|
// Range:
|
||||||
|
Loading…
Reference in New Issue
Block a user