From d6f150558250dc260b977e69d38aae3cb6c4e3e4 Mon Sep 17 00:00:00 2001 From: Tyler Neely Date: Mon, 12 Dec 2022 15:14:48 +0000 Subject: [PATCH 1/2] Make Shard into a proper struct that can contain additional metadata --- src/coordinator/shard_map.cpp | 10 +++++----- src/coordinator/shard_map.hpp | 29 ++++++++++++++++++++++++++++- src/query/v2/request_router.hpp | 10 +++++----- tests/simulation/request_router.cpp | 4 ++-- tests/simulation/sharded_map.cpp | 6 +++--- 5 files changed, 43 insertions(+), 16 deletions(-) diff --git a/src/coordinator/shard_map.cpp b/src/coordinator/shard_map.cpp index ea167db87..7334fd0f9 100644 --- a/src/coordinator/shard_map.cpp +++ b/src/coordinator/shard_map.cpp @@ -283,7 +283,7 @@ std::vector ShardMap::AssignShards(Address storage_manager, // TODO(tyler) avoid these triple-nested loops by having the heartbeat include better info bool machine_contains_shard = false; - for (auto &aas : shard) { + for (auto &aas : shard.peers) { if (initialized.contains(aas.address.unique_id)) { machine_contains_shard = true; if (aas.status != Status::CONSENSUS_PARTICIPANT) { @@ -311,7 +311,7 @@ std::vector ShardMap::AssignShards(Address storage_manager, } } - if (!machine_contains_shard && shard.size() < label_space.replication_factor) { + if (!machine_contains_shard && shard.peers.size() < label_space.replication_factor) { // increment version for each new uuid for deterministic creation IncrementShardMapVersion(); @@ -337,7 +337,7 @@ std::vector ShardMap::AssignShards(Address storage_manager, .status = Status::INITIALIZING, }; - shard.emplace_back(aas); + shard.peers.emplace_back(aas); } } } @@ -556,12 +556,12 @@ EdgeTypeIdMap ShardMap::AllocateEdgeTypeIds(const std::vector &new bool ShardMap::ClusterInitialized() const { for (const auto &[label_id, label_space] : label_spaces) { for (const auto &[low_key, shard] : label_space.shards) { - if (shard.size() < label_space.replication_factor) { + if (shard.peers.size() < label_space.replication_factor) { spdlog::info("label_space below desired replication factor"); return false; } - for (const auto &aas : shard) { + for (const auto &aas : shard.peers) { if (aas.status != Status::CONSENSUS_PARTICIPANT) { spdlog::info("shard member not yet a CONSENSUS_PARTICIPANT"); return false; diff --git a/src/coordinator/shard_map.hpp b/src/coordinator/shard_map.hpp index 80c32eeba..6bb7a0624 100644 --- a/src/coordinator/shard_map.hpp +++ b/src/coordinator/shard_map.hpp @@ -76,7 +76,34 @@ struct AddressAndStatus { }; using PrimaryKey = std::vector; -using Shard = std::vector; + +struct Shard { + std::vector peers; + uint64_t version; + + friend std::ostream &operator<<(std::ostream &in, const Shard &shard) { + using utils::print_helpers::operator<<; + + in << "Shard { peers: "; + in << shard.peers; + in << " version: "; + in << shard.version; + in << " }"; + + return in; + } + + friend bool operator==(const Shard &lhs, const Shard &rhs) = default; + + friend bool operator<(const Shard &lhs, const Shard &rhs) { + if (lhs.peers != rhs.peers) { + return lhs.peers < rhs.peers; + } + + return lhs.version < rhs.version; + } +}; + using Shards = std::map; using LabelName = std::string; using PropertyName = std::string; diff --git a/src/query/v2/request_router.hpp b/src/query/v2/request_router.hpp index 116e884c2..05918e8a3 100644 --- a/src/query/v2/request_router.hpp +++ b/src/query/v2/request_router.hpp @@ -484,7 +484,7 @@ class RequestRouter : public RequestRouterInterface { for (auto &shards : multi_shards) { for (auto &[key, shard] : shards) { - MG_ASSERT(!shard.empty()); + MG_ASSERT(!shard.peers.empty()); msgs::ScanVerticesRequest request; request.transaction_id = transaction_id_; @@ -584,11 +584,11 @@ class RequestRouter : public RequestRouterInterface { } void AddStorageClientToManager(Shard target_shard) { - MG_ASSERT(!target_shard.empty()); - auto leader_addr = target_shard.front(); + MG_ASSERT(!target_shard.peers.empty()); + auto leader_addr = target_shard.peers.front(); std::vector
addresses; - addresses.reserve(target_shard.size()); - for (auto &address : target_shard) { + addresses.reserve(target_shard.peers.size()); + for (auto &address : target_shard.peers) { addresses.push_back(std::move(address.address)); } auto cli = StorageClient(io_, std::move(leader_addr.address), std::move(addresses)); diff --git a/tests/simulation/request_router.cpp b/tests/simulation/request_router.cpp index 0f712793f..dc60bb8e5 100644 --- a/tests/simulation/request_router.cpp +++ b/tests/simulation/request_router.cpp @@ -113,7 +113,7 @@ ShardMap CreateDummyShardmap(coordinator::Address a_io_1, coordinator::Address a AddressAndStatus aas1_2{.address = a_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas1_3{.address = a_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard1 = {aas1_1, aas1_2, aas1_3}; + Shard shard1 = Shard{.peers = {aas1_1, aas1_2, aas1_3}, .version = 1}; auto key1 = storage::v3::PropertyValue(0); auto key2 = storage::v3::PropertyValue(0); @@ -125,7 +125,7 @@ ShardMap CreateDummyShardmap(coordinator::Address a_io_1, coordinator::Address a AddressAndStatus aas2_2{.address = b_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas2_3{.address = b_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard2 = {aas2_1, aas2_2, aas2_3}; + Shard shard2 = Shard{.peers = {aas2_1, aas2_2, aas2_3}, .version = 1}; auto key3 = storage::v3::PropertyValue(12); auto key4 = storage::v3::PropertyValue(13); diff --git a/tests/simulation/sharded_map.cpp b/tests/simulation/sharded_map.cpp index 91661fc66..cdc2d69b6 100644 --- a/tests/simulation/sharded_map.cpp +++ b/tests/simulation/sharded_map.cpp @@ -109,7 +109,7 @@ ShardMap CreateDummyShardmap(Address a_io_1, Address a_io_2, Address a_io_3, Add AddressAndStatus aas1_2{.address = a_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas1_3{.address = a_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard1 = {aas1_1, aas1_2, aas1_3}; + Shard shard1 = Shard{.peers = {aas1_1, aas1_2, aas1_3}, .version = 1}; const auto key1 = PropertyValue(0); const auto key2 = PropertyValue(0); @@ -121,7 +121,7 @@ ShardMap CreateDummyShardmap(Address a_io_1, Address a_io_2, Address a_io_3, Add AddressAndStatus aas2_2{.address = b_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas2_3{.address = b_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard2 = {aas2_1, aas2_2, aas2_3}; + Shard shard2 = Shard{.peers = {aas2_1, aas2_2, aas2_3}, .version = 1}; auto key3 = PropertyValue(12); auto key4 = PropertyValue(13); @@ -134,7 +134,7 @@ ShardMap CreateDummyShardmap(Address a_io_1, Address a_io_2, Address a_io_3, Add std::optional DetermineShardLocation(const Shard &target_shard, const std::vector
&a_addrs, ShardClient &a_client, const std::vector
&b_addrs, ShardClient &b_client) { - for (const auto &addr : target_shard) { + for (const auto &addr : target_shard.peers) { if (addr.address == b_addrs[0]) { return &b_client; } From 1170e6762f0909055b1ae0e7411e1d093bb6eefc Mon Sep 17 00:00:00 2001 From: Tyler Neely Date: Mon, 12 Dec 2022 15:22:17 +0000 Subject: [PATCH 2/2] Rename coordinator::Shard to coordinator::ShardMetadata to avoid conflation with storage::v3::Shard --- src/coordinator/shard_map.cpp | 10 +++---- src/coordinator/shard_map.hpp | 18 ++++++------ src/query/v2/request_router.hpp | 28 +++++++++---------- tests/simulation/request_router.cpp | 6 ++-- tests/simulation/sharded_map.cpp | 14 +++++----- tests/simulation/test_cluster.hpp | 2 +- tests/unit/high_density_shard_create_scan.cpp | 2 +- tests/unit/machine_manager.cpp | 2 +- 8 files changed, 41 insertions(+), 41 deletions(-) diff --git a/src/coordinator/shard_map.cpp b/src/coordinator/shard_map.cpp index 7334fd0f9..f38e6f823 100644 --- a/src/coordinator/shard_map.cpp +++ b/src/coordinator/shard_map.cpp @@ -360,9 +360,9 @@ bool ShardMap::SplitShard(Hlc previous_shard_map_version, LabelId label_id, cons MG_ASSERT(!shards_in_map.contains(key)); MG_ASSERT(label_spaces.contains(label_id)); - // Finding the Shard that the new PrimaryKey should map to. + // Finding the ShardMetadata that the new PrimaryKey should map to. auto prev = std::prev(shards_in_map.upper_bound(key)); - Shard duplicated_shard = prev->second; + ShardMetadata duplicated_shard = prev->second; // Apply the split shards_in_map[key] = duplicated_shard; @@ -383,7 +383,7 @@ std::optional ShardMap::InitializeNewLabel(std::string label_name, std: labels.emplace(std::move(label_name), label_id); PrimaryKey initial_key = SchemaToMinKey(schema); - Shard empty_shard = {}; + ShardMetadata empty_shard = {}; Shards shards = { {initial_key, empty_shard}, @@ -479,7 +479,7 @@ Shards ShardMap::GetShardsForRange(const LabelName &label_name, const PrimaryKey return shards; } -Shard ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const { +ShardMetadata ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const { MG_ASSERT(labels.contains(label_name)); LabelId label_id = labels.at(label_name); @@ -492,7 +492,7 @@ Shard ShardMap::GetShardForKey(const LabelName &label_name, const PrimaryKey &ke return std::prev(label_space.shards.upper_bound(key))->second; } -Shard ShardMap::GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const { +ShardMetadata ShardMap::GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const { MG_ASSERT(label_spaces.contains(label_id)); const auto &label_space = label_spaces.at(label_id); diff --git a/src/coordinator/shard_map.hpp b/src/coordinator/shard_map.hpp index 6bb7a0624..fc408e965 100644 --- a/src/coordinator/shard_map.hpp +++ b/src/coordinator/shard_map.hpp @@ -77,14 +77,14 @@ struct AddressAndStatus { using PrimaryKey = std::vector; -struct Shard { +struct ShardMetadata { std::vector peers; uint64_t version; - friend std::ostream &operator<<(std::ostream &in, const Shard &shard) { + friend std::ostream &operator<<(std::ostream &in, const ShardMetadata &shard) { using utils::print_helpers::operator<<; - in << "Shard { peers: "; + in << "ShardMetadata { peers: "; in << shard.peers; in << " version: "; in << shard.version; @@ -93,9 +93,9 @@ struct Shard { return in; } - friend bool operator==(const Shard &lhs, const Shard &rhs) = default; + friend bool operator==(const ShardMetadata &lhs, const ShardMetadata &rhs) = default; - friend bool operator<(const Shard &lhs, const Shard &rhs) { + friend bool operator<(const ShardMetadata &lhs, const ShardMetadata &rhs) { if (lhs.peers != rhs.peers) { return lhs.peers < rhs.peers; } @@ -104,7 +104,7 @@ struct Shard { } }; -using Shards = std::map; +using Shards = std::map; using LabelName = std::string; using PropertyName = std::string; using EdgeTypeName = std::string; @@ -126,7 +126,7 @@ PrimaryKey SchemaToMinKey(const std::vector &schema); struct LabelSpace { std::vector schema; // Maps between the smallest primary key stored in the shard and the shard - std::map shards; + std::map shards; size_t replication_factor; friend std::ostream &operator<<(std::ostream &in, const LabelSpace &label_space) { @@ -187,9 +187,9 @@ struct ShardMap { Shards GetShardsForRange(const LabelName &label_name, const PrimaryKey &start_key, const PrimaryKey &end_key) const; - Shard GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const; + ShardMetadata GetShardForKey(const LabelName &label_name, const PrimaryKey &key) const; - Shard GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const; + ShardMetadata GetShardForKey(const LabelId &label_id, const PrimaryKey &key) const; PropertyMap AllocatePropertyIds(const std::vector &new_properties); diff --git a/src/query/v2/request_router.hpp b/src/query/v2/request_router.hpp index 05918e8a3..3dd2f164b 100644 --- a/src/query/v2/request_router.hpp +++ b/src/query/v2/request_router.hpp @@ -50,7 +50,7 @@ template class RsmStorageClientManager { public: using CompoundKey = io::rsm::ShardRsmKey; - using Shard = coordinator::Shard; + using ShardMetadata = coordinator::ShardMetadata; RsmStorageClientManager() = default; RsmStorageClientManager(const RsmStorageClientManager &) = delete; RsmStorageClientManager(RsmStorageClientManager &&) = delete; @@ -58,25 +58,25 @@ class RsmStorageClientManager { RsmStorageClientManager &operator=(RsmStorageClientManager &&) = delete; ~RsmStorageClientManager() = default; - void AddClient(Shard key, TStorageClient client) { cli_cache_.emplace(std::move(key), std::move(client)); } + void AddClient(ShardMetadata key, TStorageClient client) { cli_cache_.emplace(std::move(key), std::move(client)); } - bool Exists(const Shard &key) { return cli_cache_.contains(key); } + bool Exists(const ShardMetadata &key) { return cli_cache_.contains(key); } void PurgeCache() { cli_cache_.clear(); } - TStorageClient &GetClient(const Shard &key) { + TStorageClient &GetClient(const ShardMetadata &key) { auto it = cli_cache_.find(key); MG_ASSERT(it != cli_cache_.end(), "Non-existing shard client"); return it->second; } private: - std::map cli_cache_; + std::map cli_cache_; }; template struct ShardRequestState { - memgraph::coordinator::Shard shard; + memgraph::coordinator::ShardMetadata shard; TRequest request; }; @@ -125,7 +125,7 @@ class RequestRouter : public RequestRouterInterface { using CoordinatorWriteRequests = coordinator::CoordinatorWriteRequests; using CoordinatorClient = coordinator::CoordinatorClient; using Address = io::Address; - using Shard = coordinator::Shard; + using ShardMetadata = coordinator::ShardMetadata; using ShardMap = coordinator::ShardMap; using CompoundKey = coordinator::PrimaryKey; using VertexAccessor = query::v2::accessors::VertexAccessor; @@ -403,7 +403,7 @@ class RequestRouter : public RequestRouterInterface { private: std::vector> RequestsForCreateVertices( const std::vector &new_vertices) { - std::map per_shard_request_table; + std::map per_shard_request_table; for (auto &new_vertex : new_vertices) { MG_ASSERT(!new_vertex.label_ids.empty(), "No label_ids provided for new vertex in RequestRouter::CreateVertices"); @@ -431,9 +431,9 @@ class RequestRouter : public RequestRouterInterface { std::vector> RequestsForCreateExpand( const std::vector &new_expands) { - std::map per_shard_request_table; + std::map per_shard_request_table; auto ensure_shard_exists_in_table = [&per_shard_request_table, - transaction_id = transaction_id_](const Shard &shard) { + transaction_id = transaction_id_](const ShardMetadata &shard) { if (!per_shard_request_table.contains(shard)) { msgs::CreateExpandRequest create_expand_request{.transaction_id = transaction_id}; per_shard_request_table.insert({shard, std::move(create_expand_request)}); @@ -503,7 +503,7 @@ class RequestRouter : public RequestRouterInterface { } std::vector> RequestsForExpandOne(const msgs::ExpandOneRequest &request) { - std::map per_shard_request_table; + std::map per_shard_request_table; msgs::ExpandOneRequest top_level_rqst_template = request; top_level_rqst_template.transaction_id = transaction_id_; top_level_rqst_template.src_vertices.clear(); @@ -533,7 +533,7 @@ class RequestRouter : public RequestRouterInterface { std::vector> RequestsForGetProperties( msgs::GetPropertiesRequest &&request) { - std::map per_shard_request_table; + std::map per_shard_request_table; auto top_level_rqst_template = request; top_level_rqst_template.transaction_id = transaction_id_; top_level_rqst_template.vertex_ids.clear(); @@ -571,7 +571,7 @@ class RequestRouter : public RequestRouterInterface { return requests; } - StorageClient &GetStorageClientForShard(Shard shard) { + StorageClient &GetStorageClientForShard(ShardMetadata shard) { if (!storage_cli_manager_.Exists(shard)) { AddStorageClientToManager(shard); } @@ -583,7 +583,7 @@ class RequestRouter : public RequestRouterInterface { return GetStorageClientForShard(std::move(shard)); } - void AddStorageClientToManager(Shard target_shard) { + void AddStorageClientToManager(ShardMetadata target_shard) { MG_ASSERT(!target_shard.peers.empty()); auto leader_addr = target_shard.peers.front(); std::vector
addresses; diff --git a/tests/simulation/request_router.cpp b/tests/simulation/request_router.cpp index dc60bb8e5..4248e7876 100644 --- a/tests/simulation/request_router.cpp +++ b/tests/simulation/request_router.cpp @@ -46,8 +46,8 @@ using coordinator::CoordinatorClient; using coordinator::CoordinatorRsm; using coordinator::HlcRequest; using coordinator::HlcResponse; -using coordinator::Shard; using coordinator::ShardMap; +using coordinator::ShardMetadata; using coordinator::Shards; using coordinator::Status; using io::Address; @@ -113,7 +113,7 @@ ShardMap CreateDummyShardmap(coordinator::Address a_io_1, coordinator::Address a AddressAndStatus aas1_2{.address = a_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas1_3{.address = a_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard1 = Shard{.peers = {aas1_1, aas1_2, aas1_3}, .version = 1}; + ShardMetadata shard1 = ShardMetadata{.peers = {aas1_1, aas1_2, aas1_3}, .version = 1}; auto key1 = storage::v3::PropertyValue(0); auto key2 = storage::v3::PropertyValue(0); @@ -125,7 +125,7 @@ ShardMap CreateDummyShardmap(coordinator::Address a_io_1, coordinator::Address a AddressAndStatus aas2_2{.address = b_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas2_3{.address = b_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard2 = Shard{.peers = {aas2_1, aas2_2, aas2_3}, .version = 1}; + ShardMetadata shard2 = ShardMetadata{.peers = {aas2_1, aas2_2, aas2_3}, .version = 1}; auto key3 = storage::v3::PropertyValue(12); auto key4 = storage::v3::PropertyValue(13); diff --git a/tests/simulation/sharded_map.cpp b/tests/simulation/sharded_map.cpp index cdc2d69b6..d27858abc 100644 --- a/tests/simulation/sharded_map.cpp +++ b/tests/simulation/sharded_map.cpp @@ -40,8 +40,8 @@ using memgraph::coordinator::CoordinatorRsm; using memgraph::coordinator::HlcRequest; using memgraph::coordinator::HlcResponse; using memgraph::coordinator::PrimaryKey; -using memgraph::coordinator::Shard; using memgraph::coordinator::ShardMap; +using memgraph::coordinator::ShardMetadata; using memgraph::coordinator::Shards; using memgraph::coordinator::Status; using memgraph::io::Address; @@ -109,7 +109,7 @@ ShardMap CreateDummyShardmap(Address a_io_1, Address a_io_2, Address a_io_3, Add AddressAndStatus aas1_2{.address = a_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas1_3{.address = a_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard1 = Shard{.peers = {aas1_1, aas1_2, aas1_3}, .version = 1}; + ShardMetadata shard1 = ShardMetadata{.peers = {aas1_1, aas1_2, aas1_3}, .version = 1}; const auto key1 = PropertyValue(0); const auto key2 = PropertyValue(0); @@ -121,7 +121,7 @@ ShardMap CreateDummyShardmap(Address a_io_1, Address a_io_2, Address a_io_3, Add AddressAndStatus aas2_2{.address = b_io_2, .status = Status::CONSENSUS_PARTICIPANT}; AddressAndStatus aas2_3{.address = b_io_3, .status = Status::CONSENSUS_PARTICIPANT}; - Shard shard2 = Shard{.peers = {aas2_1, aas2_2, aas2_3}, .version = 1}; + ShardMetadata shard2 = ShardMetadata{.peers = {aas2_1, aas2_2, aas2_3}, .version = 1}; auto key3 = PropertyValue(12); auto key4 = PropertyValue(13); @@ -131,9 +131,9 @@ ShardMap CreateDummyShardmap(Address a_io_1, Address a_io_2, Address a_io_3, Add return sm; } -std::optional DetermineShardLocation(const Shard &target_shard, const std::vector
&a_addrs, - ShardClient &a_client, const std::vector
&b_addrs, - ShardClient &b_client) { +std::optional DetermineShardLocation(const ShardMetadata &target_shard, + const std::vector
&a_addrs, ShardClient &a_client, + const std::vector
&b_addrs, ShardClient &b_client) { for (const auto &addr : target_shard.peers) { if (addr.address == b_addrs[0]) { return &b_client; @@ -275,7 +275,7 @@ int main() { const PrimaryKey compound_key = {cm_key_1, cm_key_2}; - // Look for Shard + // Look for ShardMetadata BasicResult read_res = coordinator_client.SendWriteRequest(req); diff --git a/tests/simulation/test_cluster.hpp b/tests/simulation/test_cluster.hpp index 3e14545a9..2e8bdf92f 100644 --- a/tests/simulation/test_cluster.hpp +++ b/tests/simulation/test_cluster.hpp @@ -47,8 +47,8 @@ using coordinator::GetShardMapRequest; using coordinator::GetShardMapResponse; using coordinator::Hlc; using coordinator::HlcResponse; -using coordinator::Shard; using coordinator::ShardMap; +using coordinator::ShardMetadata; using io::Address; using io::Io; using io::rsm::RsmClient; diff --git a/tests/unit/high_density_shard_create_scan.cpp b/tests/unit/high_density_shard_create_scan.cpp index cefa238ed..9fabf6ccc 100644 --- a/tests/unit/high_density_shard_create_scan.cpp +++ b/tests/unit/high_density_shard_create_scan.cpp @@ -44,8 +44,8 @@ using coordinator::GetShardMapRequest; using coordinator::GetShardMapResponse; using coordinator::Hlc; using coordinator::HlcResponse; -using coordinator::Shard; using coordinator::ShardMap; +using coordinator::ShardMetadata; using io::Address; using io::Io; using io::local_transport::LocalSystem; diff --git a/tests/unit/machine_manager.cpp b/tests/unit/machine_manager.cpp index 748233737..74b7d3863 100644 --- a/tests/unit/machine_manager.cpp +++ b/tests/unit/machine_manager.cpp @@ -45,8 +45,8 @@ using memgraph::coordinator::CoordinatorWriteRequests; using memgraph::coordinator::CoordinatorWriteResponses; using memgraph::coordinator::Hlc; using memgraph::coordinator::HlcResponse; -using memgraph::coordinator::Shard; using memgraph::coordinator::ShardMap; +using memgraph::coordinator::ShardMetadata; using memgraph::io::Io; using memgraph::io::local_transport::LocalSystem; using memgraph::io::local_transport::LocalTransport;