memgraph/tests/unit/rpc_worker_clients.cpp

145 lines
4.7 KiB
C++
Raw Normal View History

#include <mutex>
Replace boost with capnp in RPC Summary: Converts the RPC stack to use Cap'n Proto for serialization instead of boost. There are still some traces of boost in other places in the code, but most of it is removed. A future diff should cleanup boost for good. The RPC API is now changed to be more flexible with regards to how serialize data. This makes the simplest cases a bit more verbose, but allows complex serialization code to be correctly written instead of relying on hacks. (For reference, look for the old serialization of `PullRpc` which had a nasty pointer hacks to inject accessors in `TypedValue`.) Since RPC messages were uselessly modeled via inheritance of Message base class, that class is now removed. Furthermore, that approach doesn't really work with Cap'n Proto. Instead, each message type is required to have some type information. This can be automated, so `define-rpc` has been added to LCP, which hopefully simplifies defining new RPC request and response messages. Specify Cap'n Proto schema ID in cmake This preserves Cap'n Proto generated typeIds across multiple generations of capnp schemas through LCP. It is imperative that typeId stays the same to ensure that different compilations of Memgraph may communicate via RPC in a distributed cluster. Use CLOS for meta information on C++ types in LCP Since some structure slots and functions have started to repeat themselves, it makes sense to model C++ meta information via Common Lisp Object System. Depends on D1391 Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl Reviewed By: msantl Subscribers: pullbot Differential Revision: https://phabricator.memgraph.io/D1407
2018-06-04 15:48:48 +08:00
#include "capnp/serialize.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "communication/rpc/messages.hpp"
#include "communication/rpc/server.hpp"
#include "distributed/cluster_discovery_master.hpp"
#include "distributed/cluster_discovery_worker.hpp"
#include "distributed/coordination_master.hpp"
#include "distributed/coordination_worker.hpp"
#include "distributed/rpc_worker_clients.hpp"
#include "distributed/serialization.hpp"
#include "io/network/endpoint.hpp"
namespace distributed {
Replace boost with capnp in RPC Summary: Converts the RPC stack to use Cap'n Proto for serialization instead of boost. There are still some traces of boost in other places in the code, but most of it is removed. A future diff should cleanup boost for good. The RPC API is now changed to be more flexible with regards to how serialize data. This makes the simplest cases a bit more verbose, but allows complex serialization code to be correctly written instead of relying on hacks. (For reference, look for the old serialization of `PullRpc` which had a nasty pointer hacks to inject accessors in `TypedValue`.) Since RPC messages were uselessly modeled via inheritance of Message base class, that class is now removed. Furthermore, that approach doesn't really work with Cap'n Proto. Instead, each message type is required to have some type information. This can be automated, so `define-rpc` has been added to LCP, which hopefully simplifies defining new RPC request and response messages. Specify Cap'n Proto schema ID in cmake This preserves Cap'n Proto generated typeIds across multiple generations of capnp schemas through LCP. It is imperative that typeId stays the same to ensure that different compilations of Memgraph may communicate via RPC in a distributed cluster. Use CLOS for meta information on C++ types in LCP Since some structure slots and functions have started to repeat themselves, it makes sense to model C++ meta information via Common Lisp Object System. Depends on D1391 Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl Reviewed By: msantl Subscribers: pullbot Differential Revision: https://phabricator.memgraph.io/D1407
2018-06-04 15:48:48 +08:00
struct IncrementCounterReq {
using Capnp = ::capnp::AnyPointer;
static const communication::rpc::MessageType TypeInfo;
void Save(::capnp::AnyPointer::Builder *) const {}
void Load(const ::capnp::AnyPointer::Reader &) {}
};
const communication::rpc::MessageType IncrementCounterReq::TypeInfo{
0, "IncrementCounterReq"};
struct IncrementCounterRes {
using Capnp = ::capnp::AnyPointer;
static const communication::rpc::MessageType TypeInfo;
void Save(::capnp::AnyPointer::Builder *) const {}
void Load(const ::capnp::AnyPointer::Reader &) {}
};
const communication::rpc::MessageType IncrementCounterRes::TypeInfo{
1, "IncrementCounterRes"};
using IncrementCounterRpc =
communication::rpc::RequestResponse<IncrementCounterReq,
IncrementCounterRes>;
}; // namespace distributed
class RpcWorkerClientsTest : public ::testing::Test {
protected:
const io::network::Endpoint kLocalHost{"127.0.0.1", 0};
const int kWorkerCount = 2;
void SetUp() override {
master_coord_->SetRecoveryInfo(std::experimental::nullopt);
for (int i = 1; i <= kWorkerCount; ++i) {
workers_server_.emplace_back(
std::make_unique<communication::rpc::Server>(kLocalHost));
workers_coord_.emplace_back(
std::make_unique<distributed::WorkerCoordination>(
*workers_server_.back(), master_server_.endpoint()));
cluster_discovery_.emplace_back(
std::make_unique<distributed::ClusterDiscoveryWorker>(
*workers_server_.back(), *workers_coord_.back(),
rpc_workers_.GetClientPool(0)));
cluster_discovery_.back()->RegisterWorker(i);
workers_server_.back()->Register<distributed::IncrementCounterRpc>(
Replace boost with capnp in RPC Summary: Converts the RPC stack to use Cap'n Proto for serialization instead of boost. There are still some traces of boost in other places in the code, but most of it is removed. A future diff should cleanup boost for good. The RPC API is now changed to be more flexible with regards to how serialize data. This makes the simplest cases a bit more verbose, but allows complex serialization code to be correctly written instead of relying on hacks. (For reference, look for the old serialization of `PullRpc` which had a nasty pointer hacks to inject accessors in `TypedValue`.) Since RPC messages were uselessly modeled via inheritance of Message base class, that class is now removed. Furthermore, that approach doesn't really work with Cap'n Proto. Instead, each message type is required to have some type information. This can be automated, so `define-rpc` has been added to LCP, which hopefully simplifies defining new RPC request and response messages. Specify Cap'n Proto schema ID in cmake This preserves Cap'n Proto generated typeIds across multiple generations of capnp schemas through LCP. It is imperative that typeId stays the same to ensure that different compilations of Memgraph may communicate via RPC in a distributed cluster. Use CLOS for meta information on C++ types in LCP Since some structure slots and functions have started to repeat themselves, it makes sense to model C++ meta information via Common Lisp Object System. Depends on D1391 Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl Reviewed By: msantl Subscribers: pullbot Differential Revision: https://phabricator.memgraph.io/D1407
2018-06-04 15:48:48 +08:00
[this, i](const auto &req_reader, auto *res_builder) {
std::unique_lock<std::mutex> lock(mutex_);
workers_cnt_[i]++;
});
}
}
void TearDown() override {
std::vector<std::thread> wait_on_shutdown;
for (int i = 0; i < workers_coord_.size(); ++i) {
wait_on_shutdown.emplace_back([i, this]() {
workers_coord_[i]->WaitForShutdown();
workers_server_[i] = nullptr;
});
}
std::this_thread::sleep_for(300ms);
// Starts server shutdown and notifies the workers
master_coord_ = std::experimental::nullopt;
for (auto &worker : wait_on_shutdown) worker.join();
}
std::vector<std::unique_ptr<communication::rpc::Server>> workers_server_;
std::vector<std::unique_ptr<distributed::WorkerCoordination>> workers_coord_;
std::vector<std::unique_ptr<distributed::ClusterDiscoveryWorker>>
cluster_discovery_;
std::mutex mutex_;
std::unordered_map<int, int> workers_cnt_;
communication::rpc::Server master_server_{kLocalHost};
std::experimental::optional<distributed::MasterCoordination> master_coord_{
master_server_.endpoint()};
distributed::RpcWorkerClients rpc_workers_{*master_coord_};
distributed::ClusterDiscoveryMaster cluster_disocvery_{
master_server_, *master_coord_, rpc_workers_};
};
TEST_F(RpcWorkerClientsTest, GetWorkerIds) {
EXPECT_THAT(rpc_workers_.GetWorkerIds(), testing::UnorderedElementsAreArray(
master_coord_->GetWorkerIds()));
}
TEST_F(RpcWorkerClientsTest, GetClientPool) {
auto &pool1 = rpc_workers_.GetClientPool(1);
auto &pool2 = rpc_workers_.GetClientPool(2);
EXPECT_NE(&pool1, &pool2);
EXPECT_EQ(&pool1, &rpc_workers_.GetClientPool(1));
}
TEST_F(RpcWorkerClientsTest, ExecuteOnWorker) {
auto execute = [](int worker_id, auto &client) -> void {
ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
};
rpc_workers_.ExecuteOnWorker<void>(1, execute).get();
EXPECT_EQ(workers_cnt_[0], 0);
EXPECT_EQ(workers_cnt_[1], 1);
EXPECT_EQ(workers_cnt_[2], 0);
}
TEST_F(RpcWorkerClientsTest, ExecuteOnWorkers) {
auto execute = [](int worker_id, auto &client) -> void {
ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
};
// Skip master
for (auto &future : rpc_workers_.ExecuteOnWorkers<void>(0, execute))
future.get();
EXPECT_EQ(workers_cnt_[0], 0);
EXPECT_EQ(workers_cnt_[1], 1);
EXPECT_EQ(workers_cnt_[2], 1);
}