e0474a8e92
Summary: Converts the RPC stack to use Cap'n Proto for serialization instead of boost. There are still some traces of boost in other places in the code, but most of it is removed. A future diff should cleanup boost for good. The RPC API is now changed to be more flexible with regards to how serialize data. This makes the simplest cases a bit more verbose, but allows complex serialization code to be correctly written instead of relying on hacks. (For reference, look for the old serialization of `PullRpc` which had a nasty pointer hacks to inject accessors in `TypedValue`.) Since RPC messages were uselessly modeled via inheritance of Message base class, that class is now removed. Furthermore, that approach doesn't really work with Cap'n Proto. Instead, each message type is required to have some type information. This can be automated, so `define-rpc` has been added to LCP, which hopefully simplifies defining new RPC request and response messages. Specify Cap'n Proto schema ID in cmake This preserves Cap'n Proto generated typeIds across multiple generations of capnp schemas through LCP. It is imperative that typeId stays the same to ensure that different compilations of Memgraph may communicate via RPC in a distributed cluster. Use CLOS for meta information on C++ types in LCP Since some structure slots and functions have started to repeat themselves, it makes sense to model C++ meta information via Common Lisp Object System. Depends on D1391 Reviewers: buda, dgleich, mferencevic, mtomic, mculinovic, msantl Reviewed By: msantl Subscribers: pullbot Differential Revision: https://phabricator.memgraph.io/D1407
200 lines
4.9 KiB
C++
200 lines
4.9 KiB
C++
#include <thread>
|
|
|
|
#include "capnp/serialize.h"
|
|
#include "gmock/gmock.h"
|
|
#include "gtest/gtest.h"
|
|
|
|
#include "communication/rpc/client.hpp"
|
|
#include "communication/rpc/client_pool.hpp"
|
|
#include "communication/rpc/messages.hpp"
|
|
#include "communication/rpc/server.hpp"
|
|
#include "utils/timer.hpp"
|
|
|
|
using namespace communication::rpc;
|
|
using namespace std::literals::chrono_literals;
|
|
|
|
struct SumReq {
|
|
using Capnp = ::capnp::AnyPointer;
|
|
static const MessageType TypeInfo;
|
|
|
|
SumReq() {} // Needed for serialization.
|
|
SumReq(int x, int y) : x(x), y(y) {}
|
|
int x;
|
|
int y;
|
|
|
|
void Save(::capnp::AnyPointer::Builder *builder) const {
|
|
auto list_builder = builder->initAs<::capnp::List<int>>(2);
|
|
list_builder.set(0, x);
|
|
list_builder.set(1, y);
|
|
}
|
|
|
|
void Load(const ::capnp::AnyPointer::Reader &reader) {
|
|
auto list_reader = reader.getAs<::capnp::List<int>>();
|
|
x = list_reader[0];
|
|
y = list_reader[1];
|
|
}
|
|
};
|
|
|
|
const MessageType SumReq::TypeInfo{0, "SumReq"};
|
|
|
|
struct SumRes {
|
|
using Capnp = ::capnp::AnyPointer;
|
|
static const MessageType TypeInfo;
|
|
|
|
SumRes() {} // Needed for serialization.
|
|
SumRes(int sum) : sum(sum) {}
|
|
|
|
int sum;
|
|
|
|
void Save(::capnp::AnyPointer::Builder *builder) const {
|
|
auto list_builder = builder->initAs<::capnp::List<int>>(1);
|
|
list_builder.set(0, sum);
|
|
}
|
|
|
|
void Load(const ::capnp::AnyPointer::Reader &reader) {
|
|
auto list_reader = reader.getAs<::capnp::List<int>>();
|
|
sum = list_reader[0];
|
|
}
|
|
};
|
|
|
|
const MessageType SumRes::TypeInfo{1, "SumRes"};
|
|
|
|
using Sum = RequestResponse<SumReq, SumRes>;
|
|
|
|
struct EchoMessage {
|
|
using Capnp = ::capnp::AnyPointer;
|
|
static const MessageType TypeInfo;
|
|
|
|
EchoMessage() {} // Needed for serialization.
|
|
EchoMessage(const std::string &data) : data(data) {}
|
|
|
|
std::string data;
|
|
|
|
void Save(::capnp::AnyPointer::Builder *builder) const {
|
|
auto list_builder = builder->initAs<::capnp::List<::capnp::Text>>(1);
|
|
list_builder.set(0, data);
|
|
}
|
|
|
|
void Load(const ::capnp::AnyPointer::Reader &reader) {
|
|
auto list_reader = reader.getAs<::capnp::List<::capnp::Text>>();
|
|
data = list_reader[0];
|
|
}
|
|
};
|
|
|
|
const MessageType EchoMessage::TypeInfo{2, "EchoMessage"};
|
|
|
|
using Echo = RequestResponse<EchoMessage, EchoMessage>;
|
|
|
|
TEST(Rpc, Call) {
|
|
Server server({"127.0.0.1", 0});
|
|
server.Register<Sum>([](const auto &req_reader, auto *res_builder) {
|
|
SumReq req;
|
|
req.Load(req_reader);
|
|
SumRes res(req.x + req.y);
|
|
res.Save(res_builder);
|
|
});
|
|
std::this_thread::sleep_for(100ms);
|
|
|
|
Client client(server.endpoint());
|
|
auto sum = client.Call<Sum>(10, 20);
|
|
ASSERT_TRUE(sum);
|
|
EXPECT_EQ(sum->sum, 30);
|
|
}
|
|
|
|
TEST(Rpc, Abort) {
|
|
Server server({"127.0.0.1", 0});
|
|
server.Register<Sum>([](const auto &req_reader, auto *res_builder) {
|
|
SumReq req;
|
|
req.Load(req_reader);
|
|
std::this_thread::sleep_for(500ms);
|
|
SumRes res(req.x + req.y);
|
|
res.Save(res_builder);
|
|
});
|
|
std::this_thread::sleep_for(100ms);
|
|
|
|
Client client(server.endpoint());
|
|
|
|
std::thread thread([&client]() {
|
|
std::this_thread::sleep_for(100ms);
|
|
LOG(INFO) << "Shutting down the connection!";
|
|
client.Abort();
|
|
});
|
|
|
|
utils::Timer timer;
|
|
auto sum = client.Call<Sum>(10, 20);
|
|
EXPECT_FALSE(sum);
|
|
EXPECT_LT(timer.Elapsed(), 200ms);
|
|
|
|
thread.join();
|
|
}
|
|
|
|
TEST(Rpc, ClientPool) {
|
|
Server server({"127.0.0.1", 0});
|
|
server.Register<Sum>([](const auto &req_reader, auto *res_builder) {
|
|
SumReq req;
|
|
req.Load(req_reader);
|
|
std::this_thread::sleep_for(100ms);
|
|
SumRes res(req.x + req.y);
|
|
res.Save(res_builder);
|
|
});
|
|
std::this_thread::sleep_for(100ms);
|
|
|
|
Client client(server.endpoint());
|
|
|
|
/* these calls should take more than 400ms because we're using a regular
|
|
* client */
|
|
auto get_sum_client = [&client](int x, int y) {
|
|
auto sum = client.Call<Sum>(x, y);
|
|
ASSERT_TRUE(sum);
|
|
EXPECT_EQ(sum->sum, x + y);
|
|
};
|
|
|
|
utils::Timer t1;
|
|
std::vector<std::thread> threads;
|
|
for (int i = 0; i < 4; ++i) {
|
|
threads.emplace_back(get_sum_client, 2 * i, 2 * i + 1);
|
|
}
|
|
for (int i = 0; i < 4; ++i) {
|
|
threads[i].join();
|
|
}
|
|
threads.clear();
|
|
|
|
EXPECT_GE(t1.Elapsed(), 400ms);
|
|
|
|
ClientPool pool(server.endpoint());
|
|
|
|
/* these calls shouldn't take much more that 100ms because they execute in
|
|
* parallel */
|
|
auto get_sum = [&pool](int x, int y) {
|
|
auto sum = pool.Call<Sum>(x, y);
|
|
ASSERT_TRUE(sum);
|
|
EXPECT_EQ(sum->sum, x + y);
|
|
};
|
|
|
|
utils::Timer t2;
|
|
for (int i = 0; i < 4; ++i) {
|
|
threads.emplace_back(get_sum, 2 * i, 2 * i + 1);
|
|
}
|
|
for (int i = 0; i < 4; ++i) {
|
|
threads[i].join();
|
|
}
|
|
EXPECT_LE(t2.Elapsed(), 200ms);
|
|
}
|
|
|
|
TEST(Rpc, LargeMessage) {
|
|
Server server({"127.0.0.1", 0});
|
|
server.Register<Echo>([](const auto &req_reader, auto *res_builder) {
|
|
EchoMessage res;
|
|
res.Load(req_reader);
|
|
res.Save(res_builder);
|
|
});
|
|
std::this_thread::sleep_for(100ms);
|
|
|
|
std::string testdata(100000, 'a');
|
|
|
|
Client client(server.endpoint());
|
|
auto echo = client.Call<Echo>(testdata);
|
|
ASSERT_TRUE(echo);
|
|
EXPECT_EQ(echo->data, testdata);
|
|
}
|