memgraph/tests/unit/utils_resource_lock.cpp
Gareth Andrew Lloyd 3cc2bc2791
Refactor interpreter to support multiple distributed clocks (Part 1) (#1281)
* Interpreter transaction ID decoupled from storage transaction ID
* Transactional scope for indices, statistics and constraints
* Storage::Accessor now has 2 modes (unique and shared)
* Introduced ResourceLock to fix pthread mutex problems
* Split InfoQuery in two: non-transactional SystemInfoQuery and transactional DatabaseInfoQuery
* Replicable and durable statistics
* Bumped WAL/Snapshot versions
* Initial implementation of the Lamport clock

---------

Co-authored-by: Andreja Tonev <andreja.tonev@memgraph.io>
2023-10-05 16:58:39 +02:00

162 lines
4.5 KiB
C++

// Copyright 2023 Memgraph Ltd.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
// License, and you may not use this file except in compliance with the Business Source License.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
#include "utils/resource_lock.hpp"
#include <gtest/gtest.h>
#include <shared_mutex>
#include <thread>
using namespace memgraph::utils;
// Define a test fixture for the ResourceLock class
class ResourceLockTest : public testing::Test {
protected:
ResourceLock lock;
void SetUp() override {
// Setup code, if any
}
void TearDown() override {
// Tear down code, if any
}
};
TEST_F(ResourceLockTest, MultiThreadedUniqueAccess) {
constexpr int num_threads = 10;
int counter = 0;
// Lambda function representing the task performed by each thread
auto unique_task = [&]() {
for (int i = 0; i < 100; ++i) {
lock.lock();
// Critical section: Increment counter safely using the lock
++counter;
lock.unlock();
}
};
// Create multiple threads and execute the task concurrently
std::vector<std::thread> threads;
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back(unique_task);
}
// Wait for all threads to finish
for (auto &thread : threads) {
thread.join();
}
// Assert that the counter value is as expected (total number of iterations)
ASSERT_EQ(counter, num_threads * 100);
}
TEST_F(ResourceLockTest, MultiThreadedSharedAccess) {
constexpr int num_threads = 10;
int counter = 123;
// Lambda function representing the shared task performed by each thread
auto shared_task = [&]() {
for (int i = 0; i < 100; ++i) {
lock.lock_shared();
// Read the counter value safely using shared access
EXPECT_EQ(counter, 123);
lock.unlock_shared();
}
};
// Create multiple threads and execute the shared task concurrently
std::vector<std::thread> threads;
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back(shared_task);
}
// Wait for all threads to finish
for (auto &thread : threads) {
thread.join();
}
}
TEST_F(ResourceLockTest, MultiThreadedMixedAccess) {
constexpr int num_threads = 10;
int counter = 0;
// Lambda function representing the shared task performed by each thread
auto shared_task = [&](int expecting) {
for (int i = 0; i < 100; ++i) {
lock.lock_shared();
// Read the counter value safely using shared access
EXPECT_EQ(counter, expecting);
lock.unlock_shared();
}
};
// Lambda function representing the task performed by each thread
auto unique_task = [&]() {
for (int i = 0; i < 100; ++i) {
lock.lock();
// Critical section: Increment counter safely using the lock
++counter;
lock.unlock();
}
};
std::vector<std::jthread> threads;
// Unique vs shared test 1
{
std::unique_lock<ResourceLock> l(lock);
// Uniquely locked; spin up shared tasks and update while they are running
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back([shared_task] { return shared_task(3456); });
}
// Update while still holding unique access
std::this_thread::sleep_for(std::chrono::milliseconds(10));
counter = 3456;
}
// Wait for all threads to finish
threads.clear();
// Unique vs shared test 2
{
std::shared_lock<ResourceLock> l(lock);
// Shared locked; spin up unique tasks and read while they are running
for (int i = 0; i < num_threads; ++i) {
threads.emplace_back(unique_task);
}
// Update while still holding unique access
std::this_thread::sleep_for(std::chrono::milliseconds(10));
EXPECT_EQ(counter, 3456);
}
// Wait for all threads to finish
threads.clear();
EXPECT_EQ(counter, 3456 + num_threads * 100);
}
TEST_F(ResourceLockTest, TryLock) {
ASSERT_TRUE(lock.try_lock());
ASSERT_FALSE(lock.try_lock());
ASSERT_FALSE(lock.try_lock_shared());
lock.unlock();
ASSERT_TRUE(lock.try_lock_shared());
ASSERT_TRUE(lock.try_lock_shared());
ASSERT_FALSE(lock.try_lock());
ASSERT_TRUE(lock.try_lock_shared());
ASSERT_TRUE(lock.try_lock_shared());
lock.unlock_shared();
lock.unlock_shared();
lock.unlock_shared();
lock.unlock_shared();
ASSERT_TRUE(lock.try_lock());
lock.unlock();
}