memgraph/tests/unit/rpc_worker_clients.cpp
Matej Ferencevic 4e996d2667 Ensure that the durability directory is unique
Summary:
This change improves detection of errorneous situations when starting a
distributed cluster on a single machine. It asserts that the user hasn't
started more memgraph nodes on the same machine with the same durability
directory. Also, this diff improves worker registration. Now workers don't have
to have explicitly set IP addresses. The master will deduce them from the
connecting IP when the worker registers.

Reviewers: teon.banek, buda, msantl

Reviewed By: teon.banek

Subscribers: pullbot

Differential Revision: https://phabricator.memgraph.io/D1582
2018-09-03 13:40:10 +02:00

160 lines
5.2 KiB
C++

#include <experimental/filesystem>
#include <mutex>
#include "capnp/serialize.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "communication/rpc/messages.hpp"
#include "communication/rpc/server.hpp"
#include "distributed/cluster_discovery_master.hpp"
#include "distributed/cluster_discovery_worker.hpp"
#include "distributed/coordination_master.hpp"
#include "distributed/coordination_worker.hpp"
#include "distributed/rpc_worker_clients.hpp"
#include "distributed/serialization.hpp"
#include "io/network/endpoint.hpp"
#include "utils/file.hpp"
namespace fs = std::experimental::filesystem;
using namespace std::literals::chrono_literals;
namespace distributed {
struct IncrementCounterReq {
using Capnp = ::capnp::AnyPointer;
static const communication::rpc::MessageType TypeInfo;
void Save(::capnp::AnyPointer::Builder *) const {}
void Load(const ::capnp::AnyPointer::Reader &) {}
};
const communication::rpc::MessageType IncrementCounterReq::TypeInfo{
0, "IncrementCounterReq"};
struct IncrementCounterRes {
using Capnp = ::capnp::AnyPointer;
static const communication::rpc::MessageType TypeInfo;
void Save(::capnp::AnyPointer::Builder *) const {}
void Load(const ::capnp::AnyPointer::Reader &) {}
};
const communication::rpc::MessageType IncrementCounterRes::TypeInfo{
1, "IncrementCounterRes"};
using IncrementCounterRpc =
communication::rpc::RequestResponse<IncrementCounterReq,
IncrementCounterRes>;
}; // namespace distributed
class RpcWorkerClientsTest : public ::testing::Test {
protected:
const io::network::Endpoint kLocalHost{"127.0.0.1", 0};
const int kWorkerCount = 2;
void SetUp() override {
ASSERT_TRUE(utils::EnsureDir(tmp_dir_));
master_coord_->SetRecoveredSnapshot(std::experimental::nullopt);
for (int i = 1; i <= kWorkerCount; ++i) {
workers_server_.emplace_back(
std::make_unique<communication::rpc::Server>(kLocalHost));
workers_coord_.emplace_back(
std::make_unique<distributed::WorkerCoordination>(
*workers_server_.back(), master_server_.endpoint()));
cluster_discovery_.emplace_back(
std::make_unique<distributed::ClusterDiscoveryWorker>(
*workers_server_.back(), *workers_coord_.back(),
rpc_workers_.GetClientPool(0)));
cluster_discovery_.back()->RegisterWorker(
i, tmp_dir(fmt::format("worker{}", i)));
workers_server_.back()->Register<distributed::IncrementCounterRpc>(
[this, i](const auto &req_reader, auto *res_builder) {
std::unique_lock<std::mutex> lock(mutex_);
workers_cnt_[i]++;
});
}
}
void TearDown() override {
std::vector<std::thread> wait_on_shutdown;
for (int i = 0; i < workers_coord_.size(); ++i) {
wait_on_shutdown.emplace_back([i, this]() {
workers_coord_[i]->WaitForShutdown();
workers_server_[i] = nullptr;
});
}
std::this_thread::sleep_for(300ms);
// Starts server shutdown and notifies the workers
master_coord_ = std::experimental::nullopt;
for (auto &worker : wait_on_shutdown) worker.join();
// Cleanup temporary directory
if (fs::exists(tmp_dir_)) fs::remove_all(tmp_dir_);
}
const fs::path tmp_dir(const fs::path &path) const { return tmp_dir_ / path; }
fs::path tmp_dir_{fs::temp_directory_path() /
"MG_test_unit_rpc_worker_clients"};
std::vector<std::unique_ptr<communication::rpc::Server>> workers_server_;
std::vector<std::unique_ptr<distributed::WorkerCoordination>> workers_coord_;
std::vector<std::unique_ptr<distributed::ClusterDiscoveryWorker>>
cluster_discovery_;
std::mutex mutex_;
std::unordered_map<int, int> workers_cnt_;
communication::rpc::Server master_server_{kLocalHost};
std::experimental::optional<distributed::MasterCoordination> master_coord_{
master_server_.endpoint()};
distributed::RpcWorkerClients rpc_workers_{*master_coord_};
distributed::ClusterDiscoveryMaster cluster_disocvery_{
master_server_, *master_coord_, rpc_workers_, tmp_dir("master")};
};
TEST_F(RpcWorkerClientsTest, GetWorkerIds) {
EXPECT_THAT(rpc_workers_.GetWorkerIds(), testing::UnorderedElementsAreArray(
master_coord_->GetWorkerIds()));
}
TEST_F(RpcWorkerClientsTest, GetClientPool) {
auto &pool1 = rpc_workers_.GetClientPool(1);
auto &pool2 = rpc_workers_.GetClientPool(2);
EXPECT_NE(&pool1, &pool2);
EXPECT_EQ(&pool1, &rpc_workers_.GetClientPool(1));
}
TEST_F(RpcWorkerClientsTest, ExecuteOnWorker) {
auto execute = [](int worker_id, auto &client) -> void {
ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
};
rpc_workers_.ExecuteOnWorker<void>(1, execute).get();
EXPECT_EQ(workers_cnt_[0], 0);
EXPECT_EQ(workers_cnt_[1], 1);
EXPECT_EQ(workers_cnt_[2], 0);
}
TEST_F(RpcWorkerClientsTest, ExecuteOnWorkers) {
auto execute = [](int worker_id, auto &client) -> void {
ASSERT_TRUE(client.template Call<distributed::IncrementCounterRpc>());
};
// Skip master
for (auto &future : rpc_workers_.ExecuteOnWorkers<void>(0, execute))
future.get();
EXPECT_EQ(workers_cnt_[0], 0);
EXPECT_EQ(workers_cnt_[1], 1);
EXPECT_EQ(workers_cnt_[2], 1);
}