2018-04-30 15:33:09 +08:00
|
|
|
#include <mutex>
|
|
|
|
|
Replace boost with capnp in RPC
Summary:
Converts the RPC stack to use Cap'n Proto for serialization instead of
boost. There are still some traces of boost in other places in the code,
but most of it is removed. A future diff should cleanup boost for good.
The RPC API is now changed to be more flexible with regards to how
serialize data. This makes the simplest cases a bit more verbose, but
allows complex serialization code to be correctly written instead of
relying on hacks. (For reference, look for the old serialization of
`PullRpc` which had a nasty pointer hacks to inject accessors in
`TypedValue`.)
Since RPC messages were uselessly modeled via inheritance of Message
base class, that class is now removed. Furthermore, that approach
doesn't really work with Cap'n Proto. Instead, each message type is
required to have some type information. This can be automated, so
`define-rpc` has been added to LCP, which hopefully simplifies defining
new RPC request and response messages.
Specify Cap'n Proto schema ID in cmake
This preserves Cap'n Proto generated typeIds across multiple generations
of capnp schemas through LCP. It is imperative that typeId stays the
same to ensure that different compilations of Memgraph may communicate
via RPC in a distributed cluster.
Use CLOS for meta information on C++ types in LCP
Since some structure slots and functions have started to repeat
themselves, it makes sense to model C++ meta information via Common Lisp
Object System.
Depends on D1391
Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl
Reviewed By: msantl
Subscribers: pullbot
Differential Revision: https://phabricator.memgraph.io/D1407
2018-06-04 15:48:48 +08:00
|
|
|
#include "capnp/serialize.h"
|
2018-02-21 20:03:04 +08:00
|
|
|
#include "gmock/gmock.h"
|
|
|
|
#include "gtest/gtest.h"
|
|
|
|
|
|
|
|
#include "communication/rpc/messages.hpp"
|
|
|
|
#include "communication/rpc/server.hpp"
|
2018-04-03 22:19:17 +08:00
|
|
|
#include "distributed/cluster_discovery_master.hpp"
|
|
|
|
#include "distributed/cluster_discovery_worker.hpp"
|
2018-02-21 20:03:04 +08:00
|
|
|
#include "distributed/coordination_master.hpp"
|
|
|
|
#include "distributed/coordination_worker.hpp"
|
|
|
|
#include "distributed/rpc_worker_clients.hpp"
|
|
|
|
#include "distributed/serialization.hpp"
|
|
|
|
#include "io/network/endpoint.hpp"
|
|
|
|
|
|
|
|
namespace distributed {
|
|
|
|
|
Replace boost with capnp in RPC
Summary:
Converts the RPC stack to use Cap'n Proto for serialization instead of
boost. There are still some traces of boost in other places in the code,
but most of it is removed. A future diff should cleanup boost for good.
The RPC API is now changed to be more flexible with regards to how
serialize data. This makes the simplest cases a bit more verbose, but
allows complex serialization code to be correctly written instead of
relying on hacks. (For reference, look for the old serialization of
`PullRpc` which had a nasty pointer hacks to inject accessors in
`TypedValue`.)
Since RPC messages were uselessly modeled via inheritance of Message
base class, that class is now removed. Furthermore, that approach
doesn't really work with Cap'n Proto. Instead, each message type is
required to have some type information. This can be automated, so
`define-rpc` has been added to LCP, which hopefully simplifies defining
new RPC request and response messages.
Specify Cap'n Proto schema ID in cmake
This preserves Cap'n Proto generated typeIds across multiple generations
of capnp schemas through LCP. It is imperative that typeId stays the
same to ensure that different compilations of Memgraph may communicate
via RPC in a distributed cluster.
Use CLOS for meta information on C++ types in LCP
Since some structure slots and functions have started to repeat
themselves, it makes sense to model C++ meta information via Common Lisp
Object System.
Depends on D1391
Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl
Reviewed By: msantl
Subscribers: pullbot
Differential Revision: https://phabricator.memgraph.io/D1407
2018-06-04 15:48:48 +08:00
|
|
|
struct IncrementCounterReq {
|
|
|
|
using Capnp = ::capnp::AnyPointer;
|
|
|
|
static const communication::rpc::MessageType TypeInfo;
|
|
|
|
|
|
|
|
void Save(::capnp::AnyPointer::Builder *) const {}
|
|
|
|
|
|
|
|
void Load(const ::capnp::AnyPointer::Reader &) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
const communication::rpc::MessageType IncrementCounterReq::TypeInfo{
|
|
|
|
0, "IncrementCounterReq"};
|
|
|
|
|
|
|
|
struct IncrementCounterRes {
|
|
|
|
using Capnp = ::capnp::AnyPointer;
|
|
|
|
static const communication::rpc::MessageType TypeInfo;
|
|
|
|
|
|
|
|
void Save(::capnp::AnyPointer::Builder *) const {}
|
|
|
|
|
|
|
|
void Load(const ::capnp::AnyPointer::Reader &) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
const communication::rpc::MessageType IncrementCounterRes::TypeInfo{
|
|
|
|
1, "IncrementCounterRes"};
|
2018-02-21 20:03:04 +08:00
|
|
|
|
|
|
|
using IncrementCounterRpc =
|
|
|
|
communication::rpc::RequestResponse<IncrementCounterReq,
|
|
|
|
IncrementCounterRes>;
|
|
|
|
}; // namespace distributed
|
|
|
|
|
|
|
|
class RpcWorkerClientsTest : public ::testing::Test {
|
|
|
|
protected:
|
|
|
|
const io::network::Endpoint kLocalHost{"127.0.0.1", 0};
|
|
|
|
const int kWorkerCount = 2;
|
|
|
|
void SetUp() override {
|
2018-04-16 16:43:16 +08:00
|
|
|
master_coord_->SetRecoveryInfo(std::experimental::nullopt);
|
2018-02-21 20:03:04 +08:00
|
|
|
for (int i = 1; i <= kWorkerCount; ++i) {
|
2018-02-23 17:56:56 +08:00
|
|
|
workers_server_.emplace_back(
|
|
|
|
std::make_unique<communication::rpc::Server>(kLocalHost));
|
2018-02-21 20:03:04 +08:00
|
|
|
|
|
|
|
workers_coord_.emplace_back(
|
|
|
|
std::make_unique<distributed::WorkerCoordination>(
|
2018-02-23 17:56:56 +08:00
|
|
|
*workers_server_.back(), master_server_.endpoint()));
|
2018-02-21 20:03:04 +08:00
|
|
|
|
2018-04-03 22:19:17 +08:00
|
|
|
cluster_discovery_.emplace_back(
|
|
|
|
std::make_unique<distributed::ClusterDiscoveryWorker>(
|
|
|
|
*workers_server_.back(), *workers_coord_.back(),
|
|
|
|
rpc_workers_.GetClientPool(0)));
|
|
|
|
|
|
|
|
cluster_discovery_.back()->RegisterWorker(i);
|
|
|
|
|
2018-02-23 17:56:56 +08:00
|
|
|
workers_server_.back()->Register<distributed::IncrementCounterRpc>(
|
Replace boost with capnp in RPC
Summary:
Converts the RPC stack to use Cap'n Proto for serialization instead of
boost. There are still some traces of boost in other places in the code,
but most of it is removed. A future diff should cleanup boost for good.
The RPC API is now changed to be more flexible with regards to how
serialize data. This makes the simplest cases a bit more verbose, but
allows complex serialization code to be correctly written instead of
relying on hacks. (For reference, look for the old serialization of
`PullRpc` which had a nasty pointer hacks to inject accessors in
`TypedValue`.)
Since RPC messages were uselessly modeled via inheritance of Message
base class, that class is now removed. Furthermore, that approach
doesn't really work with Cap'n Proto. Instead, each message type is
required to have some type information. This can be automated, so
`define-rpc` has been added to LCP, which hopefully simplifies defining
new RPC request and response messages.
Specify Cap'n Proto schema ID in cmake
This preserves Cap'n Proto generated typeIds across multiple generations
of capnp schemas through LCP. It is imperative that typeId stays the
same to ensure that different compilations of Memgraph may communicate
via RPC in a distributed cluster.
Use CLOS for meta information on C++ types in LCP
Since some structure slots and functions have started to repeat
themselves, it makes sense to model C++ meta information via Common Lisp
Object System.
Depends on D1391
Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl
Reviewed By: msantl
Subscribers: pullbot
Differential Revision: https://phabricator.memgraph.io/D1407
2018-06-04 15:48:48 +08:00
|
|
|
[this, i](const auto &req_reader, auto *res_builder) {
|
2018-04-30 15:33:09 +08:00
|
|
|
std::unique_lock<std::mutex> lock(mutex_);
|
2018-02-21 20:03:04 +08:00
|
|
|
workers_cnt_[i]++;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void TearDown() override {
|
|
|
|
std::vector<std::thread> wait_on_shutdown;
|
2018-04-11 22:05:49 +08:00
|
|
|
for (int i = 0; i < workers_coord_.size(); ++i) {
|
|
|
|
wait_on_shutdown.emplace_back([i, this]() {
|
|
|
|
workers_coord_[i]->WaitForShutdown();
|
|
|
|
workers_server_[i] = nullptr;
|
|
|
|
});
|
|
|
|
}
|
2018-02-21 20:03:04 +08:00
|
|
|
|
|
|
|
std::this_thread::sleep_for(300ms);
|
|
|
|
|
|
|
|
// Starts server shutdown and notifies the workers
|
|
|
|
master_coord_ = std::experimental::nullopt;
|
|
|
|
for (auto &worker : wait_on_shutdown) worker.join();
|
|
|
|
}
|
|
|
|
|
2018-02-23 17:56:56 +08:00
|
|
|
std::vector<std::unique_ptr<communication::rpc::Server>> workers_server_;
|
2018-02-21 20:03:04 +08:00
|
|
|
std::vector<std::unique_ptr<distributed::WorkerCoordination>> workers_coord_;
|
2018-04-03 22:19:17 +08:00
|
|
|
std::vector<std::unique_ptr<distributed::ClusterDiscoveryWorker>>
|
|
|
|
cluster_discovery_;
|
2018-04-30 15:33:09 +08:00
|
|
|
std::mutex mutex_;
|
2018-02-21 20:03:04 +08:00
|
|
|
std::unordered_map<int, int> workers_cnt_;
|
|
|
|
|
2018-02-23 17:56:56 +08:00
|
|
|
communication::rpc::Server master_server_{kLocalHost};
|
2018-02-21 20:03:04 +08:00
|
|
|
std::experimental::optional<distributed::MasterCoordination> master_coord_{
|
2018-04-03 22:19:17 +08:00
|
|
|
master_server_.endpoint()};
|
2018-02-21 20:03:04 +08:00
|
|
|
|
2018-02-23 17:56:56 +08:00
|
|
|
distributed::RpcWorkerClients rpc_workers_{*master_coord_};
|
2018-04-03 22:19:17 +08:00
|
|
|
distributed::ClusterDiscoveryMaster cluster_disocvery_{
|
|
|
|
master_server_, *master_coord_, rpc_workers_};
|
2018-02-21 20:03:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(RpcWorkerClientsTest, GetWorkerIds) {
|
|
|
|
EXPECT_THAT(rpc_workers_.GetWorkerIds(), testing::UnorderedElementsAreArray(
|
|
|
|
master_coord_->GetWorkerIds()));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RpcWorkerClientsTest, GetClientPool) {
|
|
|
|
auto &pool1 = rpc_workers_.GetClientPool(1);
|
|
|
|
auto &pool2 = rpc_workers_.GetClientPool(2);
|
|
|
|
EXPECT_NE(&pool1, &pool2);
|
|
|
|
EXPECT_EQ(&pool1, &rpc_workers_.GetClientPool(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RpcWorkerClientsTest, ExecuteOnWorker) {
|
2018-05-29 17:32:21 +08:00
|
|
|
auto execute = [](int worker_id, auto &client) -> void {
|
2018-03-12 22:17:46 +08:00
|
|
|
ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
|
2018-02-21 20:03:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
rpc_workers_.ExecuteOnWorker<void>(1, execute).get();
|
|
|
|
EXPECT_EQ(workers_cnt_[0], 0);
|
|
|
|
EXPECT_EQ(workers_cnt_[1], 1);
|
|
|
|
EXPECT_EQ(workers_cnt_[2], 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(RpcWorkerClientsTest, ExecuteOnWorkers) {
|
2018-05-29 17:32:21 +08:00
|
|
|
auto execute = [](int worker_id, auto &client) -> void {
|
2018-03-12 22:17:46 +08:00
|
|
|
ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
|
2018-02-21 20:03:04 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Skip master
|
|
|
|
for (auto &future : rpc_workers_.ExecuteOnWorkers<void>(0, execute))
|
|
|
|
future.get();
|
|
|
|
|
|
|
|
EXPECT_EQ(workers_cnt_[0], 0);
|
|
|
|
EXPECT_EQ(workers_cnt_[1], 1);
|
|
|
|
EXPECT_EQ(workers_cnt_[2], 1);
|
|
|
|
}
|