Merge pull request #631 from memgraph/tyler_remove_shard_map_from_machine_manager

Remove redundant ShardMap copy from MachineManager to avoid race conditions
This commit is contained in:
Tyler Neely 2022-11-01 18:33:37 +01:00 committed by GitHub
commit ee4be9aa5b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 39 additions and 27 deletions

View File

@ -228,6 +228,24 @@ Hlc ShardMap::IncrementShardMapVersion() noexcept {
return shard_map_version;
}
// TODO(antaljanosbenjamin) use a single map for all name id
// mapping and a single counter to maintain the next id
std::unordered_map<uint64_t, std::string> ShardMap::IdToNames() {
std::unordered_map<uint64_t, std::string> id_to_names;
const auto map_type_ids = [&id_to_names](const auto &name_to_id_type) {
for (const auto &[name, id] : name_to_id_type) {
id_to_names.emplace(id.AsUint(), name);
}
};
map_type_ids(edge_types);
map_type_ids(labels);
map_type_ids(properties);
return id_to_names;
}
Hlc ShardMap::GetHlc() const noexcept { return shard_map_version; }
std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
@ -259,6 +277,7 @@ std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
if (same_machine) {
machine_contains_shard = true;
spdlog::info("reminding shard manager that they should begin participating in shard");
ret.push_back(ShardToInitialize{
.uuid = aas.address.unique_id,
.label_id = label_id,
@ -266,6 +285,7 @@ std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
.max_key = high_key,
.schema = schemas[label_id],
.config = Config{},
.id_to_names = IdToNames(),
});
}
}
@ -286,6 +306,7 @@ std::vector<ShardToInitialize> ShardMap::AssignShards(Address storage_manager,
.max_key = high_key,
.schema = schemas[label_id],
.config = Config{},
.id_to_names = IdToNames(),
});
AddressAndStatus aas = {

View File

@ -91,6 +91,7 @@ struct ShardToInitialize {
std::optional<PrimaryKey> max_key;
std::vector<SchemaProperty> schema;
Config config;
std::unordered_map<uint64_t, std::string> id_to_names;
};
PrimaryKey SchemaToMinKey(const std::vector<SchemaProperty> &schema);
@ -137,6 +138,8 @@ struct ShardMap {
Hlc IncrementShardMapVersion() noexcept;
Hlc GetHlc() const noexcept;
std::unordered_map<uint64_t, std::string> IdToNames();
// Returns the shard UUIDs that have been assigned but not yet acknowledged for this storage manager
std::vector<ShardToInitialize> AssignShards(Address storage_manager, std::set<boost::uuids::uuid> initialized);

View File

@ -18,7 +18,6 @@
#include <io/time.hpp>
#include <machine_manager/machine_config.hpp>
#include <storage/v3/shard_manager.hpp>
#include "coordinator/shard_map.hpp"
namespace memgraph::machine_manager {
@ -70,11 +69,11 @@ class MachineManager {
public:
// TODO initialize ShardManager with "real" coordinator addresses instead of io.GetAddress
// which is only true for single-machine config.
MachineManager(io::Io<IoImpl> io, MachineConfig config, Coordinator coordinator, coordinator::ShardMap &shard_map)
MachineManager(io::Io<IoImpl> io, MachineConfig config, Coordinator coordinator)
: io_(io),
config_(config),
coordinator_{std::move(io.ForkLocal()), {}, std::move(coordinator)},
shard_manager_{io.ForkLocal(), coordinator_.GetAddress(), shard_map} {}
shard_manager_{io.ForkLocal(), coordinator_.GetAddress()} {}
Address CoordinatorAddress() { return coordinator_.GetAddress(); }

View File

@ -658,8 +658,7 @@ int main(int argc, char **argv) {
memgraph::coordinator::Coordinator coordinator{sm};
memgraph::machine_manager::MachineManager<memgraph::io::local_transport::LocalTransport> mm{io, config, coordinator,
sm};
memgraph::machine_manager::MachineManager<memgraph::io::local_transport::LocalTransport> mm{io, config, coordinator};
std::jthread mm_thread([&mm] { mm.Run(); });
memgraph::query::v2::InterpreterContext interpreter_context{

View File

@ -321,7 +321,8 @@ bool VerticesIterable::Iterator::operator==(const Iterator &other) const {
}
Shard::Shard(const LabelId primary_label, const PrimaryKey min_primary_key,
const std::optional<PrimaryKey> max_primary_key, std::vector<SchemaProperty> schema, Config config)
const std::optional<PrimaryKey> max_primary_key, std::vector<SchemaProperty> schema, Config config,
std::unordered_map<uint64_t, std::string> id_to_name)
: primary_label_{primary_label},
min_primary_key_{min_primary_key},
max_primary_key_{max_primary_key},
@ -334,6 +335,7 @@ Shard::Shard(const LabelId primary_label, const PrimaryKey min_primary_key,
epoch_id_{utils::GenerateUUID()},
global_locker_{file_retainer_.AddLocker()} {
CreateSchema(primary_label_, schema);
StoreMapping(std::move(id_to_name));
}
Shard::~Shard() {}

View File

@ -189,7 +189,8 @@ class Shard final {
/// @throw std::system_error
/// @throw std::bad_alloc
explicit Shard(LabelId primary_label, PrimaryKey min_primary_key, std::optional<PrimaryKey> max_primary_key,
std::vector<SchemaProperty> schema, Config config = Config());
std::vector<SchemaProperty> schema, Config config = Config(),
std::unordered_map<uint64_t, std::string> id_to_name = {});
Shard(const Shard &) = delete;
Shard(Shard &&) noexcept = delete;

View File

@ -77,8 +77,7 @@ static_assert(kMinimumCronInterval < kMaximumCronInterval,
template <typename IoImpl>
class ShardManager {
public:
ShardManager(io::Io<IoImpl> io, Address coordinator_leader, coordinator::ShardMap shard_map)
: io_(io), coordinator_leader_(coordinator_leader), shard_map_{std::move(shard_map)} {}
ShardManager(io::Io<IoImpl> io, Address coordinator_leader) : io_(io), coordinator_leader_(coordinator_leader) {}
/// Periodic protocol maintenance. Returns the time that Cron should be called again
/// in the future.
@ -137,7 +136,6 @@ class ShardManager {
std::priority_queue<std::pair<Time, uuid>, std::vector<std::pair<Time, uuid>>, std::greater<>> cron_schedule_;
Time next_cron_ = Time::min();
Address coordinator_leader_;
coordinator::ShardMap shard_map_;
std::optional<ResponseFuture<WriteResponse<CoordinatorWriteResponses>>> heartbeat_res_;
// TODO(tyler) over time remove items from initialized_but_not_confirmed_rsm_
@ -210,22 +208,11 @@ class ShardManager {
io_addr.unique_id = to_init.uuid;
rsm_io.SetAddress(io_addr);
// TODO(tyler) get geers from Coordinator in HeartbeatResponse
// TODO(tyler) get peers from Coordinator in HeartbeatResponse
std::vector<Address> rsm_peers = {};
std::unique_ptr<Shard> shard =
std::make_unique<Shard>(to_init.label_id, to_init.min_key, to_init.max_key, to_init.schema, to_init.config);
// TODO(jbajic) Should be sync with coordinator and not passed
std::unordered_map<uint64_t, std::string> id_to_name;
const auto map_type_ids = [&id_to_name](const auto &name_to_id_type) {
for (const auto &[name, id] : name_to_id_type) {
id_to_name.insert({id.AsUint(), name});
}
};
map_type_ids(shard_map_.edge_types);
map_type_ids(shard_map_.labels);
map_type_ids(shard_map_.properties);
shard->StoreMapping(std::move(id_to_name));
std::unique_ptr<Shard> shard = std::make_unique<Shard>(to_init.label_id, to_init.min_key, to_init.max_key,
to_init.schema, to_init.config, to_init.id_to_names);
ShardRsm rsm_state{std::move(shard)};

View File

@ -81,7 +81,7 @@ MachineManager<SimulatorTransport> MkMm(Simulator &simulator, std::vector<Addres
Coordinator coordinator{shard_map};
return MachineManager{io, config, coordinator, shard_map};
return MachineManager{io, config, coordinator};
}
void RunMachine(MachineManager<SimulatorTransport> mm) { mm.Run(); }

View File

@ -95,7 +95,7 @@ MachineManager<LocalTransport> MkMm(LocalSystem &local_system, std::vector<Addre
Coordinator coordinator{shard_map};
return MachineManager{io, config, std::move(coordinator), shard_map};
return MachineManager{io, config, std::move(coordinator)};
}
void RunMachine(MachineManager<LocalTransport> mm) { mm.Run(); }

View File

@ -185,7 +185,7 @@ MachineManager<LocalTransport> MkMm(LocalSystem &local_system, std::vector<Addre
Coordinator coordinator{shard_map};
return MachineManager{io, config, coordinator, shard_map};
return MachineManager{io, config, coordinator};
}
void RunMachine(MachineManager<LocalTransport> mm) { mm.Run(); }