Replace debug_assert, permanent_assert with DCHECK/CHECK
Summary: Phase 2. Phase 3. Phase 4. Phase 5. Complete refactor. Reviewers: florijan, mislav.bradac Reviewed By: mislav.bradac Subscribers: mislav.bradac, pullbot Differential Revision: https://phabricator.memgraph.io/D895
This commit is contained in:
parent
4f7f59f9fc
commit
fcecb14545
@ -3,9 +3,10 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "memory/allocator.hpp"
|
||||
#include "memory/maker.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/measure_time.hpp"
|
||||
|
||||
struct TestStructure {
|
||||
@ -50,9 +51,9 @@ int main(void) {
|
||||
}
|
||||
});
|
||||
std::cout << "Fast (fast allocator): " << elapsed_fast << "ms" << std::endl;
|
||||
permanent_assert(elapsed_fast < elapsed_classic,
|
||||
"Custom fast allocator "
|
||||
"has to perform faster on simple array allocation");
|
||||
CHECK(elapsed_fast < elapsed_classic)
|
||||
<< "Custom fast allocator "
|
||||
"has to perform faster on simple array allocation";
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
// Like option just for pointers. More efficent than option.
|
||||
template <class T>
|
||||
@ -12,7 +12,7 @@ class OptionPtr {
|
||||
bool is_present() { return ptr != nullptr; }
|
||||
|
||||
T *get() {
|
||||
debug_assert(is_present(), "Data is not present.");
|
||||
DCHECK(is_present()) << "Data is not present.";
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -3,8 +3,9 @@
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "option_ptr.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/crtp.hpp"
|
||||
|
||||
// RobinHood base.
|
||||
@ -95,20 +96,20 @@ class RhBase {
|
||||
IteratorBase(IteratorBase &&) = default;
|
||||
|
||||
D *operator*() {
|
||||
debug_assert(index < map->capacity && map->array[index].valid(),
|
||||
"Either index is invalid or data is not valid.");
|
||||
DCHECK(index < map->capacity && map->array[index].valid())
|
||||
<< "Either index is invalid or data is not valid.";
|
||||
return map->array[index].ptr();
|
||||
}
|
||||
|
||||
D *operator->() {
|
||||
debug_assert(index < map->capacity && map->array[index].valid(),
|
||||
"Either index is invalid or data is not valid.");
|
||||
DCHECK(index < map->capacity && map->array[index].valid())
|
||||
<< "Either index is invalid or data is not valid.";
|
||||
return map->array[index].ptr();
|
||||
}
|
||||
|
||||
It &operator++() {
|
||||
debug_assert(index < map->capacity && map->array[index].valid(),
|
||||
"Either index is invalid or data is not valid.");
|
||||
DCHECK(index < map->capacity && map->array[index].valid())
|
||||
<< "Either index is invalid or data is not valid.";
|
||||
auto mask = map->mask();
|
||||
do {
|
||||
advanced++;
|
||||
@ -298,14 +299,14 @@ class RhBase {
|
||||
* K must be comparable with ==.
|
||||
* HashMap behaves as if it isn't owner of entries.
|
||||
* BE CAREFUL - this structure assumes that the pointer to Data is 8-alligned!
|
||||
*/
|
||||
*/
|
||||
template <class K, class D, size_t init_size_pow2 = 2>
|
||||
class RhHashMap : public RhBase<K, D, init_size_pow2> {
|
||||
typedef RhBase<K, D, init_size_pow2> base;
|
||||
using base::array;
|
||||
using base::index;
|
||||
using base::capacity;
|
||||
using base::count;
|
||||
using base::index;
|
||||
using typename base::Combined;
|
||||
|
||||
void increase_size() {
|
||||
@ -357,8 +358,8 @@ class RhHashMap : public RhBase<K, D, init_size_pow2> {
|
||||
|
||||
// Inserts element. Returns true if element wasn't in the map.
|
||||
bool insert(D *data) {
|
||||
permanent_assert(!(((uint64_t) static_cast<void *>(data) & 7)),
|
||||
"Data is not 8-alligned.");
|
||||
CHECK(!(((uint64_t) static_cast<void *>(data) & 7)))
|
||||
<< "Data is not 8-alligned.";
|
||||
if (count < capacity) {
|
||||
size_t mask = this->mask();
|
||||
auto key = std::ref(data->get_key());
|
||||
|
@ -5,9 +5,10 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "communication/bolt/v1/constants.hpp"
|
||||
#include "io/network/stream_buffer.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/bswap.hpp"
|
||||
|
||||
namespace communication::bolt {
|
||||
@ -54,7 +55,7 @@ class Buffer {
|
||||
*/
|
||||
void Written(size_t len) {
|
||||
size_ += len;
|
||||
debug_assert(size_ <= Size, "Written more than storage has space!");
|
||||
DCHECK(size_ <= Size) << "Written more than storage has space!";
|
||||
}
|
||||
|
||||
/**
|
||||
@ -65,7 +66,7 @@ class Buffer {
|
||||
* the buffer
|
||||
*/
|
||||
void Shift(size_t len) {
|
||||
debug_assert(len <= size_, "Tried to shift more data than the buffer has!");
|
||||
DCHECK(len <= size_) << "Tried to shift more data than the buffer has!";
|
||||
memmove(data_, data_ + len, size_ - len);
|
||||
size_ -= len;
|
||||
}
|
||||
@ -90,4 +91,4 @@ class Buffer {
|
||||
uint8_t data_[Size];
|
||||
size_t size_{0};
|
||||
};
|
||||
}
|
||||
} // namespace communication::bolt
|
||||
|
@ -1,3 +1,5 @@
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "communication/bolt/v1/decoder/decoded_value.hpp"
|
||||
|
||||
namespace communication::bolt {
|
||||
@ -74,7 +76,7 @@ DecodedValue::DecodedValue(const DecodedValue &other) : type_(other.type_) {
|
||||
new (&path_v) DecodedPath(other.path_v);
|
||||
return;
|
||||
}
|
||||
permanent_fail("Unsupported DecodedValue::Type");
|
||||
LOG(FATAL) << "Unsupported DecodedValue::Type";
|
||||
}
|
||||
|
||||
DecodedValue &DecodedValue::operator=(const DecodedValue &other) {
|
||||
@ -117,7 +119,7 @@ DecodedValue &DecodedValue::operator=(const DecodedValue &other) {
|
||||
new (&path_v) DecodedPath(other.path_v);
|
||||
return *this;
|
||||
}
|
||||
permanent_fail("Unsupported DecodedValue::Type");
|
||||
LOG(FATAL) << "Unsupported DecodedValue::Type";
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -160,7 +162,7 @@ DecodedValue::~DecodedValue() {
|
||||
path_v.~DecodedPath();
|
||||
return;
|
||||
}
|
||||
permanent_fail("Unsupported DecodedValue::Type");
|
||||
LOG(FATAL) << "Unsupported DecodedValue::Type";
|
||||
}
|
||||
|
||||
DecodedValue::operator query::TypedValue() const {
|
||||
@ -224,8 +226,7 @@ std::ostream &operator<<(std::ostream &os, const DecodedUnboundedEdge &edge) {
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const DecodedPath &path) {
|
||||
os << path.vertices[0];
|
||||
debug_assert(path.indices.size() % 2 == 0,
|
||||
"Must have even number of indices");
|
||||
DCHECK(path.indices.size() % 2 == 0) << "Must have even number of indices";
|
||||
for (auto it = path.indices.begin(); it != path.indices.end();) {
|
||||
auto edge_ind = *it++;
|
||||
auto vertex_ind = *it++;
|
||||
@ -276,7 +277,7 @@ std::ostream &operator<<(std::ostream &os, const DecodedValue &value) {
|
||||
case DecodedValue::Type::Path:
|
||||
return os << value.ValuePath();
|
||||
}
|
||||
permanent_fail("Unsupported DecodedValue::Type");
|
||||
LOG(FATAL) << "Unsupported DecodedValue::Type";
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const DecodedValue::Type type) {
|
||||
@ -304,6 +305,6 @@ std::ostream &operator<<(std::ostream &os, const DecodedValue::Type type) {
|
||||
case DecodedValue::Type::Path:
|
||||
return os << "path";
|
||||
}
|
||||
permanent_fail("Unsupported DecodedValue::Type");
|
||||
LOG(FATAL) << "Unsupported DecodedValue::Type";
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +168,7 @@ class Decoder {
|
||||
private:
|
||||
bool ReadNull(const Marker &marker, DecodedValue *data) {
|
||||
DLOG(INFO) << "[ReadNull] Start";
|
||||
debug_assert(marker == Marker::Null, "Received invalid marker!");
|
||||
DCHECK(marker == Marker::Null) << "Received invalid marker!";
|
||||
*data = DecodedValue();
|
||||
DLOG(INFO) << "[ReadNull] Success";
|
||||
return true;
|
||||
@ -176,8 +176,8 @@ class Decoder {
|
||||
|
||||
bool ReadBool(const Marker &marker, DecodedValue *data) {
|
||||
DLOG(INFO) << "[ReadBool] Start";
|
||||
debug_assert(marker == Marker::False || marker == Marker::True,
|
||||
"Received invalid marker!");
|
||||
DCHECK(marker == Marker::False || marker == Marker::True)
|
||||
<< "Received invalid marker!";
|
||||
if (marker == Marker::False) {
|
||||
*data = DecodedValue(false);
|
||||
} else {
|
||||
@ -243,7 +243,7 @@ class Decoder {
|
||||
uint64_t value;
|
||||
double ret;
|
||||
DLOG(INFO) << "[ReadDouble] Start";
|
||||
debug_assert(marker == Marker::Float64, "Received invalid marker!");
|
||||
DCHECK(marker == Marker::Float64) << "Received invalid marker!";
|
||||
if (!buffer_.Read(reinterpret_cast<uint8_t *>(&value), sizeof(value))) {
|
||||
DLOG(WARNING) << "[ReadDouble] Missing data!";
|
||||
return false;
|
||||
@ -523,7 +523,8 @@ class Decoder {
|
||||
}
|
||||
for (const auto &vertex : dv.ValueList()) {
|
||||
if (vertex.type() != DecodedValue::Type::Vertex) {
|
||||
DLOG(WARNING) << "[ReadPath] Received a '" << vertex.type() << "' element in the vertices list!";
|
||||
DLOG(WARNING) << "[ReadPath] Received a '" << vertex.type()
|
||||
<< "' element in the vertices list!";
|
||||
return false;
|
||||
}
|
||||
path.vertices.emplace_back(vertex.ValueVertex());
|
||||
@ -536,7 +537,8 @@ class Decoder {
|
||||
}
|
||||
for (const auto &edge : dv.ValueList()) {
|
||||
if (edge.type() != DecodedValue::Type::UnboundedEdge) {
|
||||
DLOG(WARNING) << "[ReadPath] Received a '" << edge.type() << "' element in the edges list!";
|
||||
DLOG(WARNING) << "[ReadPath] Received a '" << edge.type()
|
||||
<< "' element in the edges list!";
|
||||
return false;
|
||||
}
|
||||
path.edges.emplace_back(edge.ValueUnboundedEdge());
|
||||
@ -549,7 +551,8 @@ class Decoder {
|
||||
}
|
||||
for (const auto &index : dv.ValueList()) {
|
||||
if (index.type() != DecodedValue::Type::Int) {
|
||||
DLOG(WARNING) << "[ReadPath] Received a '" << index.type() << "' element in the indices list (expected an int)!";
|
||||
DLOG(WARNING) << "[ReadPath] Received a '" << index.type()
|
||||
<< "' element in the indices list (expected an int)!";
|
||||
return false;
|
||||
}
|
||||
path.indices.emplace_back(index.ValueInt());
|
||||
@ -562,4 +565,4 @@ class Decoder {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
}
|
||||
} // namespace communication::bolt
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "io/network/epoll.hpp"
|
||||
#include "io/network/socket.hpp"
|
||||
|
||||
@ -60,8 +62,8 @@ class Session {
|
||||
}
|
||||
|
||||
~Session() {
|
||||
debug_assert(!db_accessor_,
|
||||
"Transaction should have already be closed in Close");
|
||||
DCHECK(!db_accessor_)
|
||||
<< "Transaction should have already be closed in Close";
|
||||
}
|
||||
|
||||
/**
|
||||
@ -167,7 +169,7 @@ class Session {
|
||||
* Commits associated transaction.
|
||||
*/
|
||||
void Commit() {
|
||||
debug_assert(db_accessor_, "Commit called and there is no transaction");
|
||||
DCHECK(db_accessor_) << "Commit called and there is no transaction";
|
||||
db_accessor_->Commit();
|
||||
db_accessor_ = nullptr;
|
||||
}
|
||||
@ -176,7 +178,7 @@ class Session {
|
||||
* Aborts associated transaction.
|
||||
*/
|
||||
void Abort() {
|
||||
debug_assert(db_accessor_, "Abort called and there is no transaction");
|
||||
DCHECK(db_accessor_) << "Abort called and there is no transaction";
|
||||
db_accessor_->Abort();
|
||||
db_accessor_ = nullptr;
|
||||
}
|
||||
@ -215,4 +217,4 @@ class Session {
|
||||
Close();
|
||||
}
|
||||
};
|
||||
}
|
||||
} // namespace communication::bolt
|
||||
|
@ -55,7 +55,7 @@ State StateErrorRun(Session &session, State state) {
|
||||
} else if (state == State::ErrorWaitForRollback) {
|
||||
return State::WaitForRollback;
|
||||
} else {
|
||||
permanent_assert(false, "Shouldn't happen");
|
||||
LOG(FATAL) << "Shouldn't happen";
|
||||
}
|
||||
} else {
|
||||
uint8_t value = underlying_cast(marker);
|
||||
@ -89,4 +89,4 @@ State StateErrorRun(Session &session, State state) {
|
||||
return state;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace communication::bolt
|
||||
|
@ -63,8 +63,8 @@ State HandleRun(Session &session, State state, Marker marker) {
|
||||
return State::Close;
|
||||
}
|
||||
|
||||
debug_assert(!session.encoder_buffer_.HasData(),
|
||||
"There should be no data to write in this state");
|
||||
DCHECK(!session.encoder_buffer_.HasData())
|
||||
<< "There should be no data to write in this state";
|
||||
|
||||
DLOG(INFO) << fmt::format("[Run] '{}'", query.ValueString());
|
||||
bool in_explicit_transaction = false;
|
||||
@ -313,4 +313,4 @@ State StateExecutingRun(Session &session, State state) {
|
||||
return State::Close;
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace communication::bolt
|
||||
|
@ -18,8 +18,8 @@ namespace communication::bolt {
|
||||
*/
|
||||
template <typename Session>
|
||||
State StateInitRun(Session &session) {
|
||||
debug_assert(!session.encoder_buffer_.HasData(),
|
||||
"There should be no data to write in this state");
|
||||
DCHECK(!session.encoder_buffer_.HasData())
|
||||
<< "There should be no data to write in this state";
|
||||
DLOG(INFO) << "Parsing message";
|
||||
|
||||
Marker marker;
|
||||
@ -68,4 +68,4 @@ State StateInitRun(Session &session) {
|
||||
|
||||
return State::Idle;
|
||||
}
|
||||
}
|
||||
} // namespace communication::bolt
|
||||
|
@ -2,8 +2,8 @@
|
||||
|
||||
#include <map>
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
/**
|
||||
* A mocker for the data output record stream.
|
||||
@ -20,33 +20,33 @@ class ResultStreamFaker {
|
||||
ResultStreamFaker &operator=(ResultStreamFaker &&) = default;
|
||||
|
||||
void Header(const std::vector<std::string> &fields) {
|
||||
debug_assert(current_state_ == State::Start,
|
||||
"Headers can only be written in the beginning");
|
||||
DCHECK(current_state_ == State::Start)
|
||||
<< "Headers can only be written in the beginning";
|
||||
header_ = fields;
|
||||
current_state_ = State::WritingResults;
|
||||
}
|
||||
|
||||
void Result(const std::vector<query::TypedValue> &values) {
|
||||
debug_assert(current_state_ == State::WritingResults,
|
||||
"Can't accept results before header nor after summary");
|
||||
DCHECK(current_state_ == State::WritingResults)
|
||||
<< "Can't accept results before header nor after summary";
|
||||
results_.push_back(values);
|
||||
}
|
||||
|
||||
void Summary(const std::map<std::string, query::TypedValue> &summary) {
|
||||
debug_assert(current_state_ != State::Done, "Can only send a summary once");
|
||||
DCHECK(current_state_ != State::Done) << "Can only send a summary once";
|
||||
summary_ = summary;
|
||||
current_state_ = State::Done;
|
||||
}
|
||||
|
||||
const auto &GetHeader() const {
|
||||
debug_assert(current_state_ != State::Start, "Header not written");
|
||||
DCHECK(current_state_ != State::Start) << "Header not written";
|
||||
return header_;
|
||||
}
|
||||
|
||||
const auto &GetResults() const { return results_; }
|
||||
|
||||
const auto &GetSummary() const {
|
||||
debug_assert(current_state_ == State::Done, "Summary not written");
|
||||
DCHECK(current_state_ == State::Done) << "Summary not written";
|
||||
return summary_;
|
||||
}
|
||||
|
||||
|
@ -87,8 +87,7 @@ class Server {
|
||||
: io::network::BaseListener(socket), server_(server) {}
|
||||
|
||||
void OnData() {
|
||||
debug_assert(server_.idx_ < server_.workers_.size(),
|
||||
"Invalid worker id.");
|
||||
DCHECK(server_.idx_ < server_.workers_.size()) << "Invalid worker id.";
|
||||
DLOG(INFO) << "On connect";
|
||||
auto connection = AcceptConnection();
|
||||
if (UNLIKELY(!connection)) {
|
||||
|
@ -2,9 +2,9 @@
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "threading/sync/lockable.hpp"
|
||||
#include "threading/sync/spinlock.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
/**
|
||||
* A sequentially ordered non-unique lock-free concurrent collection of bits.
|
||||
@ -36,17 +36,17 @@ class DynamicBitset {
|
||||
static constexpr size_t kSize = sizeof(block_t) * 8;
|
||||
|
||||
block_t at(size_t k, size_t n) const {
|
||||
debug_assert(k + n - 1 < kSize, "Invalid index.");
|
||||
DCHECK(k + n - 1 < kSize) << "Invalid index.";
|
||||
return (block_.load() >> k) & bitmask(n);
|
||||
}
|
||||
|
||||
void set(size_t k, size_t n) {
|
||||
debug_assert(k + n - 1 < kSize, "Invalid index.");
|
||||
DCHECK(k + n - 1 < kSize) << "Invalid index.";
|
||||
block_.fetch_or(bitmask(n) << k);
|
||||
}
|
||||
|
||||
void clear(size_t k, size_t n) {
|
||||
debug_assert(k + n - 1 < kSize, "Invalid index.");
|
||||
DCHECK(k + n - 1 < kSize) << "Invalid index.";
|
||||
block_.fetch_and(~(bitmask(n) << k));
|
||||
}
|
||||
|
||||
@ -151,12 +151,12 @@ class DynamicBitset {
|
||||
private:
|
||||
// Finds the chunk to which k-th bit belongs fails if k is out of bounds.
|
||||
const Chunk &FindChunk(size_t &k) const {
|
||||
debug_assert(k < head_.load()->high(), "Index out of bounds");
|
||||
DCHECK(k < head_.load()->high()) << "Index out of bounds";
|
||||
Chunk *chunk = head_;
|
||||
|
||||
while (k < chunk->low()) {
|
||||
chunk = chunk->next_;
|
||||
debug_assert(chunk != nullptr, "chunk is nullptr");
|
||||
DCHECK(chunk != nullptr) << "chunk is nullptr";
|
||||
}
|
||||
k -= chunk->low();
|
||||
return *chunk;
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
/**
|
||||
* Bitset data structure with a number of bits provided in constructor.
|
||||
@ -27,9 +27,9 @@ class Bitset {
|
||||
* @param idx position of bit.
|
||||
*/
|
||||
void Set(int idx) {
|
||||
debug_assert(idx >= 0, "Invalid bit location.");
|
||||
debug_assert(idx < static_cast<int64_t>(blocks_.size()) * block_size_,
|
||||
"Invalid bit location.");
|
||||
DCHECK(idx >= 0) << "Invalid bit location.";
|
||||
DCHECK(idx < static_cast<int64_t>(blocks_.size()) * block_size_)
|
||||
<< "Invalid bit location.";
|
||||
int bucket = idx / block_size_;
|
||||
blocks_[bucket] |= TStore(1) << idx % block_size_;
|
||||
}
|
||||
@ -39,9 +39,9 @@ class Bitset {
|
||||
* @return 1/0.
|
||||
*/
|
||||
bool At(int idx) const {
|
||||
debug_assert(idx >= 0, "Invalid bit location.");
|
||||
debug_assert(idx < static_cast<int64_t>(blocks_.size()) * block_size_,
|
||||
"Invalid bit location.");
|
||||
DCHECK(idx >= 0) << "Invalid bit location.";
|
||||
DCHECK(idx < static_cast<int64_t>(blocks_.size()) * block_size_)
|
||||
<< "Invalid bit location.";
|
||||
int bucket = idx / block_size_;
|
||||
return (blocks_[bucket] >> (idx % block_size_)) & 1;
|
||||
}
|
||||
@ -51,8 +51,8 @@ class Bitset {
|
||||
* @return intersection.
|
||||
*/
|
||||
Bitset<TStore> Intersect(const Bitset<TStore> &other) const {
|
||||
debug_assert(this->blocks_.size() == other.blocks_.size(),
|
||||
"Bitsets are not of equal size.");
|
||||
DCHECK(this->blocks_.size() == other.blocks_.size())
|
||||
<< "Bitsets are not of equal size.";
|
||||
Bitset<TStore> ret(this->blocks_.size() * this->block_size_);
|
||||
for (int i = 0; i < (int)this->blocks_.size(); ++i) {
|
||||
ret.blocks_[i] = this->blocks_[i] & other.blocks_[i];
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <utility>
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
#include "utils/crtp.hpp"
|
||||
|
||||
// TODO: reimplement this. It's correct but somewhat inefecient and it could be
|
||||
@ -63,7 +63,7 @@ class ConcurrentList {
|
||||
IteratorBase() : list(nullptr), curr(nullptr) {}
|
||||
|
||||
IteratorBase(ConcurrentList *list) : list(list) {
|
||||
debug_assert(list != nullptr, "List is nullptr.");
|
||||
DCHECK(list != nullptr) << "List is nullptr.";
|
||||
// Increment number of iterators accessing list.
|
||||
list->active_threads_no_++;
|
||||
// Start from the begining of list.
|
||||
@ -97,7 +97,7 @@ class ConcurrentList {
|
||||
head_rem != nullptr && // There is some garbage
|
||||
cas<Node *>(list->removed, head_rem,
|
||||
nullptr) // No new garbage was added.
|
||||
) {
|
||||
) {
|
||||
// Delete all removed node following chain of next_rem starting
|
||||
// from head_rem.
|
||||
auto now = head_rem;
|
||||
@ -113,11 +113,11 @@ class ConcurrentList {
|
||||
IteratorBase &operator=(IteratorBase &&other) = delete;
|
||||
|
||||
T &operator*() const {
|
||||
debug_assert(valid(), "Not valid data.");
|
||||
DCHECK(valid()) << "Not valid data.";
|
||||
return curr->data;
|
||||
}
|
||||
T *operator->() const {
|
||||
debug_assert(valid(), "Not valid data.");
|
||||
DCHECK(valid()) << "Not valid data.";
|
||||
return &(curr->data);
|
||||
}
|
||||
|
||||
@ -125,7 +125,7 @@ class ConcurrentList {
|
||||
|
||||
// Iterating is wait free.
|
||||
It &operator++() {
|
||||
debug_assert(valid(), "Not valid data.");
|
||||
DCHECK(valid()) << "Not valid data.";
|
||||
do {
|
||||
// We don't care about previous unless it's alive. If it's not alive we
|
||||
// are going to look for it again and just incurr performance hit
|
||||
@ -141,7 +141,7 @@ class ConcurrentList {
|
||||
It &operator++(int) { return operator++(); }
|
||||
|
||||
bool is_removed() {
|
||||
debug_assert(valid(), "Not valid data.");
|
||||
DCHECK(valid()) << "Not valid data.";
|
||||
return load(curr->removed);
|
||||
}
|
||||
|
||||
@ -180,7 +180,7 @@ class ConcurrentList {
|
||||
// This can be improved with combinig the removed flag with prev.next or
|
||||
// curr.next
|
||||
bool remove() {
|
||||
debug_assert(valid(), "Not valid data.");
|
||||
DCHECK(valid()) << "Not valid data.";
|
||||
// Try to logically remove it.
|
||||
if (cas(curr->removed, false, true)) {
|
||||
// I removed it!!!
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include <atomic>
|
||||
#include <mutex>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
/** @brief A queue with lock-free concurrent push and
|
||||
* single-threaded deletion.
|
||||
@ -57,15 +57,14 @@ class ConcurrentPushQueue {
|
||||
bool operator!=(const Iterator &rhs) const { return !(*this == rhs); }
|
||||
|
||||
Iterator &operator++() {
|
||||
debug_assert(current_ != nullptr, "Prefix increment on invalid iterator");
|
||||
DCHECK(current_ != nullptr) << "Prefix increment on invalid iterator";
|
||||
previous_ = current_;
|
||||
current_ = current_->next_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
Iterator operator++(int) {
|
||||
debug_assert(current_ != nullptr,
|
||||
"Postfix increment on invalid iterator");
|
||||
DCHECK(current_ != nullptr) << "Postfix increment on invalid iterator";
|
||||
Iterator rval(queue_, current_);
|
||||
previous_ = current_;
|
||||
current_ = current_->next_;
|
||||
@ -73,12 +72,12 @@ class ConcurrentPushQueue {
|
||||
}
|
||||
|
||||
TElement &operator*() {
|
||||
debug_assert(current_ != nullptr,
|
||||
"Dereferencing operator on invalid iterator");
|
||||
DCHECK(current_ != nullptr)
|
||||
<< "Dereferencing operator on invalid iterator";
|
||||
return current_->element_;
|
||||
}
|
||||
TElement *operator->() {
|
||||
debug_assert(current_ != nullptr, "Arrow operator on invalid iterator");
|
||||
DCHECK(current_ != nullptr) << "Arrow operator on invalid iterator";
|
||||
return ¤t_->element_;
|
||||
}
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <memory>
|
||||
#include <type_traits>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
#include "utils/crtp.hpp"
|
||||
#include "utils/placeholder.hpp"
|
||||
#include "utils/random/fast_binomial.hpp"
|
||||
@ -256,38 +256,38 @@ class SkipList : private Lockable<lock_t> {
|
||||
IteratorBase(const IteratorBase &) = default;
|
||||
|
||||
const T &operator*() const {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
return node->value();
|
||||
}
|
||||
|
||||
const T *operator->() const {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
return &node->value();
|
||||
}
|
||||
|
||||
T &operator*() {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
return node->value();
|
||||
}
|
||||
|
||||
T *operator->() {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
return &node->value();
|
||||
}
|
||||
|
||||
operator T &() {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
return node->value();
|
||||
}
|
||||
|
||||
It &operator++() {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
node = node->forward(0);
|
||||
return this->derived();
|
||||
}
|
||||
|
||||
bool has_next() {
|
||||
debug_assert(node != nullptr, "Node is nullptr.");
|
||||
DCHECK(node != nullptr) << "Node is nullptr.";
|
||||
return node->forward(0) != nullptr;
|
||||
}
|
||||
|
||||
@ -357,22 +357,22 @@ class SkipList : private Lockable<lock_t> {
|
||||
}
|
||||
|
||||
T &operator*() {
|
||||
debug_assert(node_ != nullptr, "Node is nullptr.");
|
||||
DCHECK(node_ != nullptr) << "Node is nullptr.";
|
||||
return node_->value();
|
||||
}
|
||||
|
||||
T *operator->() {
|
||||
debug_assert(node_ != nullptr, "Node is nullptr.");
|
||||
DCHECK(node_ != nullptr) << "Node is nullptr.";
|
||||
return &node_->value();
|
||||
}
|
||||
|
||||
operator T &() {
|
||||
debug_assert(node_ != nullptr, "Node is nullptr.");
|
||||
DCHECK(node_ != nullptr) << "Node is nullptr.";
|
||||
return node_->value();
|
||||
}
|
||||
|
||||
ReverseIterator &operator++() {
|
||||
debug_assert(node_ != nullptr, "Node is nullptr.");
|
||||
DCHECK(node_ != nullptr) << "Node is nullptr.";
|
||||
do {
|
||||
next();
|
||||
} while (node_->flags.is_marked());
|
||||
@ -458,7 +458,7 @@ class SkipList : private Lockable<lock_t> {
|
||||
|
||||
Accessor(TSkipList *skiplist)
|
||||
: skiplist(skiplist), status_(skiplist->gc.CreateNewAccessor()) {
|
||||
debug_assert(skiplist != nullptr, "Skiplist is nullptr.");
|
||||
DCHECK(skiplist != nullptr) << "Skiplist is nullptr.";
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -20,31 +20,29 @@ GraphDbAccessor::~GraphDbAccessor() {
|
||||
const std::string &GraphDbAccessor::name() const { return db_.name_; }
|
||||
|
||||
void GraphDbAccessor::AdvanceCommand() {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
transaction_->engine_.Advance(transaction_->id_);
|
||||
}
|
||||
|
||||
void GraphDbAccessor::Commit() {
|
||||
debug_assert(!commited_ && !aborted_,
|
||||
"Already aborted or commited transaction.");
|
||||
DCHECK(!commited_ && !aborted_) << "Already aborted or commited transaction.";
|
||||
transaction_->Commit();
|
||||
commited_ = true;
|
||||
}
|
||||
|
||||
void GraphDbAccessor::Abort() {
|
||||
debug_assert(!commited_ && !aborted_,
|
||||
"Already aborted or commited transaction.");
|
||||
DCHECK(!commited_ && !aborted_) << "Already aborted or commited transaction.";
|
||||
transaction_->Abort();
|
||||
aborted_ = true;
|
||||
}
|
||||
|
||||
bool GraphDbAccessor::should_abort() const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return transaction_->should_abort();
|
||||
}
|
||||
|
||||
VertexAccessor GraphDbAccessor::InsertVertex() {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
|
||||
// create a vertex
|
||||
auto vertex_vlist = new mvcc::VersionList<Vertex>(*transaction_);
|
||||
@ -56,7 +54,7 @@ VertexAccessor GraphDbAccessor::InsertVertex() {
|
||||
|
||||
void GraphDbAccessor::BuildIndex(const GraphDbTypes::Label &label,
|
||||
const GraphDbTypes::Property &property) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
|
||||
{
|
||||
// switch the build_in_progress to true
|
||||
@ -70,7 +68,7 @@ void GraphDbAccessor::BuildIndex(const GraphDbTypes::Label &label,
|
||||
bool expected = true;
|
||||
[[gnu::unused]] bool success =
|
||||
db_.index_build_in_progress_.compare_exchange_strong(expected, false);
|
||||
debug_assert(success, "BuildIndexInProgress flag was not set during build");
|
||||
DCHECK(success) << "BuildIndexInProgress flag was not set during build";
|
||||
});
|
||||
|
||||
const LabelPropertyIndex::Key key(label, property);
|
||||
@ -115,7 +113,7 @@ void GraphDbAccessor::BuildIndex(const GraphDbTypes::Label &label,
|
||||
void GraphDbAccessor::UpdateLabelIndices(const GraphDbTypes::Label &label,
|
||||
const VertexAccessor &vertex_accessor,
|
||||
const Vertex *const vertex) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
db_.labels_index_.Update(label, vertex_accessor.vlist_, vertex);
|
||||
db_.label_property_index_.UpdateOnLabel(label, vertex_accessor.vlist_,
|
||||
vertex);
|
||||
@ -124,38 +122,36 @@ void GraphDbAccessor::UpdateLabelIndices(const GraphDbTypes::Label &label,
|
||||
void GraphDbAccessor::UpdatePropertyIndex(
|
||||
const GraphDbTypes::Property &property,
|
||||
const RecordAccessor<Vertex> &record_accessor, const Vertex *const vertex) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
db_.label_property_index_.UpdateOnProperty(property, record_accessor.vlist_,
|
||||
vertex);
|
||||
}
|
||||
|
||||
int64_t GraphDbAccessor::VerticesCount() const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return db_.vertices_.access().size();
|
||||
}
|
||||
|
||||
int64_t GraphDbAccessor::VerticesCount(const GraphDbTypes::Label &label) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return db_.labels_index_.Count(label);
|
||||
}
|
||||
|
||||
int64_t GraphDbAccessor::VerticesCount(
|
||||
const GraphDbTypes::Label &label,
|
||||
const GraphDbTypes::Property &property) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
const LabelPropertyIndex::Key key(label, property);
|
||||
debug_assert(db_.label_property_index_.IndexExists(key),
|
||||
"Index doesn't exist.");
|
||||
DCHECK(db_.label_property_index_.IndexExists(key)) << "Index doesn't exist.";
|
||||
return db_.label_property_index_.Count(key);
|
||||
}
|
||||
|
||||
int64_t GraphDbAccessor::VerticesCount(const GraphDbTypes::Label &label,
|
||||
const GraphDbTypes::Property &property,
|
||||
const PropertyValue &value) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
const LabelPropertyIndex::Key key(label, property);
|
||||
debug_assert(db_.label_property_index_.IndexExists(key),
|
||||
"Index doesn't exist.");
|
||||
DCHECK(db_.label_property_index_.IndexExists(key)) << "Index doesn't exist.";
|
||||
return db_.label_property_index_.PositionAndCount(key, value).second;
|
||||
}
|
||||
|
||||
@ -164,17 +160,14 @@ int64_t GraphDbAccessor::VerticesCount(
|
||||
const std::experimental::optional<utils::Bound<PropertyValue>> lower,
|
||||
const std::experimental::optional<utils::Bound<PropertyValue>> upper)
|
||||
const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
const LabelPropertyIndex::Key key(label, property);
|
||||
debug_assert(db_.label_property_index_.IndexExists(key),
|
||||
"Index doesn't exist.");
|
||||
permanent_assert(lower || upper, "At least one bound must be provided");
|
||||
permanent_assert(
|
||||
!lower || lower.value().value().type() != PropertyValue::Type::Null,
|
||||
"Null value is not a valid index bound");
|
||||
permanent_assert(
|
||||
!upper || upper.value().value().type() != PropertyValue::Type::Null,
|
||||
"Null value is not a valid index bound");
|
||||
DCHECK(db_.label_property_index_.IndexExists(key)) << "Index doesn't exist.";
|
||||
CHECK(lower || upper) << "At least one bound must be provided";
|
||||
CHECK(!lower || lower.value().value().type() != PropertyValue::Type::Null)
|
||||
<< "Null value is not a valid index bound";
|
||||
CHECK(!upper || upper.value().value().type() != PropertyValue::Type::Null)
|
||||
<< "Null value is not a valid index bound";
|
||||
|
||||
if (!upper) {
|
||||
auto lower_pac =
|
||||
@ -203,7 +196,7 @@ int64_t GraphDbAccessor::VerticesCount(
|
||||
}
|
||||
|
||||
bool GraphDbAccessor::RemoveVertex(VertexAccessor &vertex_accessor) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
vertex_accessor.SwitchNew();
|
||||
// it's possible the vertex was removed already in this transaction
|
||||
// due to it getting matched multiple times by some patterns
|
||||
@ -217,7 +210,7 @@ bool GraphDbAccessor::RemoveVertex(VertexAccessor &vertex_accessor) {
|
||||
}
|
||||
|
||||
void GraphDbAccessor::DetachRemoveVertex(VertexAccessor &vertex_accessor) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
vertex_accessor.SwitchNew();
|
||||
for (auto edge_accessor : vertex_accessor.in())
|
||||
RemoveEdge(edge_accessor, true, false);
|
||||
@ -236,7 +229,7 @@ void GraphDbAccessor::DetachRemoveVertex(VertexAccessor &vertex_accessor) {
|
||||
EdgeAccessor GraphDbAccessor::InsertEdge(VertexAccessor &from,
|
||||
VertexAccessor &to,
|
||||
GraphDbTypes::EdgeType edge_type) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
// create an edge
|
||||
auto edge_vlist = new mvcc::VersionList<Edge>(*transaction_, *from.vlist_,
|
||||
*to.vlist_, edge_type);
|
||||
@ -262,13 +255,13 @@ EdgeAccessor GraphDbAccessor::InsertEdge(VertexAccessor &from,
|
||||
}
|
||||
|
||||
int64_t GraphDbAccessor::EdgesCount() const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return db_.edges_.access().size();
|
||||
}
|
||||
|
||||
void GraphDbAccessor::RemoveEdge(EdgeAccessor &edge_accessor,
|
||||
bool remove_from_from, bool remove_from_to) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
// it's possible the edge was removed already in this transaction
|
||||
// due to it getting matched multiple times by some patterns
|
||||
// we can only delete it once, so check if it's already deleted
|
||||
@ -282,37 +275,37 @@ void GraphDbAccessor::RemoveEdge(EdgeAccessor &edge_accessor,
|
||||
}
|
||||
|
||||
GraphDbTypes::Label GraphDbAccessor::Label(const std::string &label_name) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return &(*db_.labels_.access().insert(label_name).first);
|
||||
}
|
||||
|
||||
const std::string &GraphDbAccessor::LabelName(
|
||||
const GraphDbTypes::Label label) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return *label;
|
||||
}
|
||||
|
||||
GraphDbTypes::EdgeType GraphDbAccessor::EdgeType(
|
||||
const std::string &edge_type_name) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return &(*db_.edge_types_.access().insert(edge_type_name).first);
|
||||
}
|
||||
|
||||
const std::string &GraphDbAccessor::EdgeTypeName(
|
||||
const GraphDbTypes::EdgeType edge_type) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return *edge_type;
|
||||
}
|
||||
|
||||
GraphDbTypes::Property GraphDbAccessor::Property(
|
||||
const std::string &property_name) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return &(*db_.properties_.access().insert(property_name).first);
|
||||
}
|
||||
|
||||
const std::string &GraphDbAccessor::PropertyName(
|
||||
const GraphDbTypes::Property property) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return *property;
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include "cppitertools/filter.hpp"
|
||||
#include "cppitertools/imap.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "graph_db.hpp"
|
||||
#include "storage/edge_accessor.hpp"
|
||||
@ -107,7 +108,7 @@ class GraphDbAccessor {
|
||||
* ignored).
|
||||
*/
|
||||
auto Vertices(bool current_state) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
// wrap version lists into accessors, which will look for visible versions
|
||||
auto accessors =
|
||||
iter::imap([this](auto vlist) { return VertexAccessor(*vlist, *this); },
|
||||
@ -136,7 +137,7 @@ class GraphDbAccessor {
|
||||
* @return iterable collection
|
||||
*/
|
||||
auto Vertices(const GraphDbTypes::Label &label, bool current_state) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return iter::imap(
|
||||
[this](auto vlist) { return VertexAccessor(*vlist, *this); },
|
||||
db_.labels_index_.GetVlists(label, *transaction_, current_state));
|
||||
@ -155,10 +156,10 @@ class GraphDbAccessor {
|
||||
*/
|
||||
auto Vertices(const GraphDbTypes::Label &label,
|
||||
const GraphDbTypes::Property &property, bool current_state) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
debug_assert(db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property)),
|
||||
"Label+property index doesn't exist.");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
DCHECK(db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property)))
|
||||
<< "Label+property index doesn't exist.";
|
||||
return iter::imap(
|
||||
[this](auto vlist) { return VertexAccessor(*vlist, *this); },
|
||||
db_.label_property_index_.GetVlists(
|
||||
@ -182,12 +183,12 @@ class GraphDbAccessor {
|
||||
auto Vertices(const GraphDbTypes::Label &label,
|
||||
const GraphDbTypes::Property &property,
|
||||
const PropertyValue &value, bool current_state) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
debug_assert(db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property)),
|
||||
"Label+property index doesn't exist.");
|
||||
permanent_assert(value.type() != PropertyValue::Type::Null,
|
||||
"Can't query index for propery value type null.");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
DCHECK(db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property)))
|
||||
<< "Label+property index doesn't exist.";
|
||||
CHECK(value.type() != PropertyValue::Type::Null)
|
||||
<< "Can't query index for propery value type null.";
|
||||
return iter::imap(
|
||||
[this](auto vlist) { return VertexAccessor(*vlist, *this); },
|
||||
db_.label_property_index_.GetVlists(
|
||||
@ -227,10 +228,10 @@ class GraphDbAccessor {
|
||||
const std::experimental::optional<utils::Bound<PropertyValue>> lower,
|
||||
const std::experimental::optional<utils::Bound<PropertyValue>> upper,
|
||||
bool current_state) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
debug_assert(db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property)),
|
||||
"Label+property index doesn't exist.");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
DCHECK(db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property)))
|
||||
<< "Label+property index doesn't exist.";
|
||||
return iter::imap(
|
||||
[this](auto vlist) { return VertexAccessor(*vlist, *this); },
|
||||
db_.label_property_index_.GetVlists(
|
||||
@ -274,7 +275,7 @@ class GraphDbAccessor {
|
||||
* ignored).
|
||||
*/
|
||||
auto Edges(bool current_state) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
|
||||
// wrap version lists into accessors, which will look for visible versions
|
||||
auto accessors =
|
||||
@ -349,7 +350,7 @@ class GraphDbAccessor {
|
||||
*/
|
||||
bool LabelPropertyIndexExists(const GraphDbTypes::Label &label,
|
||||
const GraphDbTypes::Property &property) const {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return db_.label_property_index_.IndexExists(
|
||||
LabelPropertyIndex::Key(label, property));
|
||||
}
|
||||
@ -358,7 +359,7 @@ class GraphDbAccessor {
|
||||
* @brief - Returns vector of keys of label-property indices.
|
||||
*/
|
||||
std::vector<LabelPropertyIndex::Key> GetIndicesKeys() {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
return db_.label_property_index_.GetIndicesKeys();
|
||||
}
|
||||
|
||||
@ -493,7 +494,7 @@ class GraphDbAccessor {
|
||||
*/
|
||||
template <typename TRecord>
|
||||
bool Reconstruct(RecordAccessor<TRecord> &accessor) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
accessor.vlist_->find_set_old_new(*transaction_, accessor.old_,
|
||||
accessor.new_);
|
||||
accessor.current_ = accessor.old_ ? accessor.old_ : accessor.new_;
|
||||
@ -516,18 +517,16 @@ class GraphDbAccessor {
|
||||
*/
|
||||
template <typename TRecord>
|
||||
void Update(RecordAccessor<TRecord> &accessor) {
|
||||
debug_assert(!commited_ && !aborted_, "Accessor committed or aborted");
|
||||
DCHECK(!commited_ && !aborted_) << "Accessor committed or aborted";
|
||||
// can't update a deleted record if:
|
||||
// - we only have old_ and it hasn't been deleted
|
||||
// - we have new_ and it hasn't been deleted
|
||||
if (!accessor.new_) {
|
||||
debug_assert(
|
||||
!accessor.old_->is_expired_by(*transaction_),
|
||||
"Can't update a record deleted in the current transaction+command");
|
||||
DCHECK(!accessor.old_->is_expired_by(*transaction_))
|
||||
<< "Can't update a record deleted in the current transaction+commad";
|
||||
} else {
|
||||
debug_assert(
|
||||
!accessor.new_->is_expired_by(*transaction_),
|
||||
"Can't update a record deleted in the current transaction+command");
|
||||
DCHECK(!accessor.new_->is_expired_by(*transaction_))
|
||||
<< "Can't update a record deleted in the current transaction+command";
|
||||
}
|
||||
|
||||
if (!accessor.new_) accessor.new_ = accessor.vlist_->update(*transaction_);
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "cppitertools/filter.hpp"
|
||||
#include "cppitertools/imap.hpp"
|
||||
#include "cppitertools/takewhile.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "data_structures/concurrent/concurrent_map.hpp"
|
||||
#include "data_structures/concurrent/skiplist.hpp"
|
||||
@ -162,7 +163,7 @@ static void Refresh(
|
||||
|
||||
[[gnu::unused]] auto success =
|
||||
indices_entries_accessor.remove(indices_entry);
|
||||
debug_assert(success, "Unable to delete entry.");
|
||||
DCHECK(success) << "Unable to delete entry.";
|
||||
}
|
||||
|
||||
// if the record is still visible,
|
||||
|
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "data_structures/concurrent/concurrent_map.hpp"
|
||||
#include "database/graph_db.hpp"
|
||||
#include "database/graph_db_datatypes.hpp"
|
||||
@ -171,7 +173,7 @@ class KeyIndex {
|
||||
* @return true if it contains, false otherwise.
|
||||
*/
|
||||
static bool Exists(const GraphDbTypes::Label &label, const Vertex *const v) {
|
||||
debug_assert(v != nullptr, "Vertex is nullptr.");
|
||||
DCHECK(v != nullptr) << "Vertex is nullptr.";
|
||||
// We have to check for existance of label because the transaction
|
||||
// might not see the label, or the label was deleted and not yet
|
||||
// removed from the index.
|
||||
@ -186,7 +188,7 @@ class KeyIndex {
|
||||
*/
|
||||
static bool Exists(const GraphDbTypes::EdgeType &edge_type,
|
||||
const Edge *const e) {
|
||||
debug_assert(e != nullptr, "Edge is nullptr.");
|
||||
DCHECK(e != nullptr) << "Edge is nullptr.";
|
||||
// We have to check for equality of edge types because the transaction
|
||||
// might not see the edge type, or the edge type was deleted and not yet
|
||||
// removed from the index.
|
||||
|
@ -168,7 +168,7 @@ class LabelPropertyIndex {
|
||||
* key sorted ascendingly by the property value.
|
||||
*/
|
||||
auto GetVlists(const Key &key, const tx::Transaction &t, bool current_state) {
|
||||
debug_assert(ready_for_use_.access().contains(key), "Index not yet ready.");
|
||||
DCHECK(ready_for_use_.access().contains(key)) << "Index not yet ready.";
|
||||
auto access = GetKeyStorage(key)->access();
|
||||
auto begin = access.begin();
|
||||
return IndexUtils::GetVlists<typename SkipList<IndexEntry>::Iterator,
|
||||
@ -196,7 +196,7 @@ class LabelPropertyIndex {
|
||||
*/
|
||||
auto GetVlists(const Key &key, const PropertyValue &value,
|
||||
const tx::Transaction &t, bool current_state) {
|
||||
debug_assert(ready_for_use_.access().contains(key), "Index not yet ready.");
|
||||
DCHECK(ready_for_use_.access().contains(key)) << "Index not yet ready.";
|
||||
auto access = GetKeyStorage(key)->access();
|
||||
auto min_ptr = std::numeric_limits<std::uintptr_t>::min();
|
||||
auto start_iter = access.find_or_larger(IndexEntry(
|
||||
@ -246,14 +246,14 @@ class LabelPropertyIndex {
|
||||
const std::experimental::optional<utils::Bound<PropertyValue>> lower,
|
||||
const std::experimental::optional<utils::Bound<PropertyValue>> upper,
|
||||
const tx::Transaction &transaction, bool current_state) {
|
||||
debug_assert(ready_for_use_.access().contains(key), "Index not yet ready.");
|
||||
DCHECK(ready_for_use_.access().contains(key)) << "Index not yet ready.";
|
||||
|
||||
auto type = [](const auto &bound) { return bound.value().value().type(); };
|
||||
permanent_assert(lower || upper, "At least one bound must be provided");
|
||||
permanent_assert(!lower || type(lower) != PropertyValue::Type::Null,
|
||||
"Null value is not a valid index bound");
|
||||
permanent_assert(!upper || type(upper) != PropertyValue::Type::Null,
|
||||
"Null value is not a valid index bound");
|
||||
CHECK(lower || upper) << "At least one bound must be provided";
|
||||
CHECK(!lower || type(lower) != PropertyValue::Type::Null)
|
||||
<< "Null value is not a valid index bound";
|
||||
CHECK(!upper || type(upper) != PropertyValue::Type::Null)
|
||||
<< "Null value is not a valid index bound";
|
||||
|
||||
// helper function for creating a bound with an IndexElement
|
||||
auto make_index_bound = [](const auto &optional_bound, bool bottom) {
|
||||
@ -328,9 +328,8 @@ class LabelPropertyIndex {
|
||||
*/
|
||||
int64_t Count(const Key &key) {
|
||||
auto index = GetKeyStorage(key);
|
||||
permanent_assert(index != nullptr, "Index doesn't exist.");
|
||||
permanent_assert(ready_for_use_.access().contains(key),
|
||||
"Index not yet ready.");
|
||||
CHECK(index != nullptr) << "Index doesn't exist.";
|
||||
CHECK(ready_for_use_.access().contains(key)) << "Index not yet ready.";
|
||||
return index->access().size();
|
||||
}
|
||||
|
||||
@ -468,15 +467,15 @@ class LabelPropertyIndex {
|
||||
mb.end(), cmp);
|
||||
}
|
||||
default:
|
||||
permanent_fail("Unimplemented type operator.");
|
||||
LOG(FATAL) << "Unimplemented type operator.";
|
||||
}
|
||||
}
|
||||
|
||||
// helper for getting a double from PropertyValue, if possible
|
||||
auto get_double = [](const PropertyValue &value) {
|
||||
debug_assert(value.type() == PropertyValue::Type::Int ||
|
||||
value.type() == PropertyValue::Type::Double,
|
||||
"Invalid data type.");
|
||||
DCHECK(value.type() == PropertyValue::Type::Int ||
|
||||
value.type() == PropertyValue::Type::Double)
|
||||
<< "Invalid data type.";
|
||||
if (value.type() == PropertyValue::Type::Int)
|
||||
return static_cast<double>(value.Value<int64_t>());
|
||||
return value.Value<double>();
|
||||
@ -539,7 +538,7 @@ class LabelPropertyIndex {
|
||||
*/
|
||||
static bool Exists(const Key &key, const PropertyValue &value,
|
||||
const Vertex *const v) {
|
||||
debug_assert(v != nullptr, "Vertex is nullptr.");
|
||||
DCHECK(v != nullptr) << "Vertex is nullptr.";
|
||||
// We have to check for existance of label because the transaction
|
||||
// might not see the label, or the label was deleted and not yet
|
||||
// removed from the index.
|
||||
|
@ -73,8 +73,8 @@ class FileReaderBuffer {
|
||||
* reference to a summary object where summary should be written.
|
||||
*/
|
||||
bool ReadSummary(snapshot::Summary &summary) {
|
||||
debug_assert(input_stream_.tellg() == 0,
|
||||
"Summary should be read before other data!");
|
||||
DCHECK(input_stream_.tellg() == 0)
|
||||
<< "Summary should be read before other data!";
|
||||
input_stream_.seekg(-static_cast<int64_t>(sizeof(snapshot::Summary)),
|
||||
std::ios::end);
|
||||
if (input_stream_.fail()) return false;
|
||||
|
@ -64,8 +64,8 @@ class FileWriterBuffer {
|
||||
* writing was successful.
|
||||
*/
|
||||
void WriteSummary(int64_t vertex_num, int64_t edge_num) {
|
||||
debug_assert(vertex_num >= 0, "Number of vertices should't be negative");
|
||||
debug_assert(vertex_num >= 0, "Number of edges should't be negative");
|
||||
DCHECK(vertex_num >= 0) << "Number of vertices should't be negative";
|
||||
DCHECK(vertex_num >= 0) << "Number of edges should't be negative";
|
||||
WriteLong(vertex_num);
|
||||
WriteLong(edge_num);
|
||||
WriteLong(hasher_.hash());
|
||||
|
@ -56,7 +56,7 @@ class Record : public Version<T> {
|
||||
}
|
||||
|
||||
void mark_created(const tx::Transaction &t) {
|
||||
debug_assert(tx_.cre == 0, "Marking node as created twice.");
|
||||
DCHECK(tx_.cre == 0) << "Marking node as created twice.";
|
||||
tx_.cre = t.id_;
|
||||
cmd_.cre = t.cid();
|
||||
}
|
||||
@ -217,8 +217,8 @@ class Record : public Version<T> {
|
||||
// below, but it has a same name.
|
||||
bool committed(uint8_t mask, tx::transaction_id_t id,
|
||||
const tx::Transaction &t) {
|
||||
debug_assert(mask == Hints::kCre || mask == Hints::kExp,
|
||||
"Mask must be either kCre or kExp");
|
||||
DCHECK(mask == Hints::kCre || mask == Hints::kExp)
|
||||
<< "Mask must be either kCre or kExp";
|
||||
// Dominik Gleich says 4 april 2017: the tests in this routine are correct;
|
||||
// if you think they're not, you're wrong, and you should think about it
|
||||
// again. I know, it happened to me (and also to Matej Gradicek).
|
||||
@ -245,8 +245,8 @@ class Record : public Version<T> {
|
||||
*/
|
||||
bool committed(uint8_t mask, tx::transaction_id_t id,
|
||||
const tx::Engine &engine) const {
|
||||
debug_assert(mask == Hints::kCre || mask == Hints::kExp,
|
||||
"Mask must be either kCre or kExp");
|
||||
DCHECK(mask == Hints::kCre || mask == Hints::kExp)
|
||||
<< "Mask must be either kCre or kExp";
|
||||
// If hints are set, return if id is committed.
|
||||
if (hints_.Get(mask)) return hints_.Get(Hints::kCmt & mask);
|
||||
|
||||
@ -295,4 +295,4 @@ class Record : public Version<T> {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
} // namespace mvcc
|
||||
|
@ -81,7 +81,7 @@ class VersionList {
|
||||
* @return pair<status, to_delete>; status is true - If version list is empty
|
||||
* after garbage collection. to_delete points to the newest record that is not
|
||||
* visible anymore. If none exists to_delete will point to nullptr.
|
||||
*/
|
||||
*/
|
||||
std::pair<bool, T *> GcDeleted(const tx::Snapshot &snapshot,
|
||||
const tx::Engine &engine) {
|
||||
// nullptr
|
||||
@ -194,7 +194,7 @@ class VersionList {
|
||||
* @param t The transaction
|
||||
*/
|
||||
T *update(tx::Transaction &t) {
|
||||
debug_assert(head_ != nullptr, "Head is nullptr on update.");
|
||||
DCHECK(head_ != nullptr) << "Head is nullptr on update.";
|
||||
T *old_record = nullptr;
|
||||
T *new_record = nullptr;
|
||||
find_set_old_new(t, old_record, new_record);
|
||||
@ -204,16 +204,16 @@ class VersionList {
|
||||
if (new_record) return new_record;
|
||||
|
||||
// check if we found any visible records
|
||||
permanent_assert(old_record != nullptr, "Updating nullptr record");
|
||||
CHECK(old_record != nullptr) << "Updating nullptr record";
|
||||
|
||||
return update(old_record, t);
|
||||
}
|
||||
|
||||
void remove(tx::Transaction &t) {
|
||||
debug_assert(head_ != nullptr, "Head is nullptr on removal.");
|
||||
DCHECK(head_ != nullptr) << "Head is nullptr on removal.";
|
||||
auto record = find(t);
|
||||
|
||||
permanent_assert(record != nullptr, "Removing nullptr record");
|
||||
CHECK(record != nullptr) << "Removing nullptr record";
|
||||
|
||||
// TODO: Is this lock and validate necessary
|
||||
lock_and_validate(record, t);
|
||||
@ -223,15 +223,14 @@ class VersionList {
|
||||
// TODO(flor): This should also be private but can't be right now because of
|
||||
// the way graph_db_accessor works.
|
||||
void remove(T *record, tx::Transaction &t) {
|
||||
debug_assert(record != nullptr, "Record is nullptr on removal.");
|
||||
DCHECK(record != nullptr) << "Record is nullptr on removal.";
|
||||
lock_and_validate(record, t);
|
||||
record->mark_expired(t);
|
||||
}
|
||||
|
||||
private:
|
||||
void lock_and_validate(T *record, tx::Transaction &t) {
|
||||
debug_assert(record != nullptr,
|
||||
"Record is nullptr on lock and validation.");
|
||||
DCHECK(record != nullptr) << "Record is nullptr on lock and validation.";
|
||||
|
||||
// take a lock on this node
|
||||
t.TakeLock(lock_);
|
||||
@ -245,7 +244,7 @@ class VersionList {
|
||||
}
|
||||
|
||||
T *update(T *record, tx::Transaction &t) {
|
||||
debug_assert(record != nullptr, "Record is nullptr on update.");
|
||||
DCHECK(record != nullptr) << "Record is nullptr on update.";
|
||||
lock_and_validate(record, t);
|
||||
|
||||
// It could be done with unique_ptr but while this could mean memory
|
||||
@ -269,4 +268,4 @@ class VersionList {
|
||||
std::atomic<T *> head_{nullptr};
|
||||
RecordLock lock_;
|
||||
};
|
||||
}
|
||||
} // namespace mvcc
|
||||
|
@ -5,8 +5,9 @@
|
||||
#include <locale>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "query/exceptions.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace query {
|
||||
@ -142,7 +143,7 @@ std::string ParseStringLiteral(const std::string &s) {
|
||||
default:
|
||||
// This should never happen, except grammar changes and we don't
|
||||
// notice change in this production.
|
||||
debug_assert(false, "can't happen");
|
||||
DLOG(FATAL) << "can't happen";
|
||||
throw std::exception();
|
||||
}
|
||||
escape = false;
|
||||
@ -164,12 +165,12 @@ double ParseDoubleLiteral(const std::string &s) {
|
||||
}
|
||||
|
||||
std::string ParseParameter(const std::string &s) {
|
||||
debug_assert(s[0] == '$', "Invalid string passed as parameter name");
|
||||
DCHECK(s[0] == '$') << "Invalid string passed as parameter name";
|
||||
if (s[1] != '`') return s.substr(1);
|
||||
// If parameter name is escaped symbolic name then symbolic name should be
|
||||
// unescaped and leading and trailing backquote should be removed.
|
||||
debug_assert(s.size() > 3U && s.back() == '`',
|
||||
"Invalid string passed as parameter name");
|
||||
DCHECK(s.size() > 3U && s.back() == '`')
|
||||
<< "Invalid string passed as parameter name";
|
||||
std::string out;
|
||||
for (int i = 2; i < static_cast<int>(s.size()) - 1; ++i) {
|
||||
if (s[i] == '`') {
|
||||
@ -179,4 +180,4 @@ std::string ParseParameter(const std::string &s) {
|
||||
}
|
||||
return out;
|
||||
}
|
||||
}
|
||||
} // namespace query
|
||||
|
@ -5,12 +5,13 @@
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "database/graph_db.hpp"
|
||||
#include "database/graph_db_datatypes.hpp"
|
||||
#include "query/frontend/ast/ast_visitor.hpp"
|
||||
#include "query/parameters.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
// Hash function for the key in pattern atom property maps.
|
||||
namespace std {
|
||||
@ -25,20 +26,20 @@ struct hash<std::pair<std::string, GraphDbTypes::Property>> {
|
||||
std::hash<std::string> string_hash{};
|
||||
std::hash<GraphDbTypes::Property> property_hash{};
|
||||
};
|
||||
}
|
||||
} // namespace std
|
||||
|
||||
namespace query {
|
||||
|
||||
#define CLONE_BINARY_EXPRESSION \
|
||||
auto Clone(AstTreeStorage &storage) const->std::remove_const< \
|
||||
std::remove_pointer<decltype(this)>::type>::type * override { \
|
||||
std::remove_pointer<decltype(this)>::type>::type *override { \
|
||||
return storage.Create< \
|
||||
std::remove_cv<std::remove_reference<decltype(*this)>::type>::type>( \
|
||||
expression1_->Clone(storage), expression2_->Clone(storage)); \
|
||||
}
|
||||
#define CLONE_UNARY_EXPRESSION \
|
||||
auto Clone(AstTreeStorage &storage) const->std::remove_const< \
|
||||
std::remove_pointer<decltype(this)>::type>::type * override { \
|
||||
std::remove_pointer<decltype(this)>::type>::type *override { \
|
||||
return storage.Create< \
|
||||
std::remove_cv<std::remove_reference<decltype(*this)>::type>::type>( \
|
||||
expression_->Clone(storage)); \
|
||||
@ -498,10 +499,9 @@ class IfOperator : public Expression {
|
||||
condition_(condition),
|
||||
then_expression_(then_expression),
|
||||
else_expression_(else_expression) {
|
||||
debug_assert(
|
||||
condition_ != nullptr && then_expression_ != nullptr &&
|
||||
else_expression_ != nullptr,
|
||||
"clause_, then_expression_ and else_expression_ can't be nullptr");
|
||||
DCHECK(condition_ != nullptr && then_expression_ != nullptr &&
|
||||
else_expression_ != nullptr)
|
||||
<< "clause_, then_expression_ and else_expression_ can't be nullptr";
|
||||
}
|
||||
};
|
||||
|
||||
@ -782,9 +782,10 @@ class Function : public Expression {
|
||||
std::vector<Expression *> arguments_;
|
||||
|
||||
protected:
|
||||
Function(int uid, std::function<TypedValue(const std::vector<TypedValue> &,
|
||||
GraphDbAccessor &)>
|
||||
function,
|
||||
Function(int uid,
|
||||
std::function<TypedValue(const std::vector<TypedValue> &,
|
||||
GraphDbAccessor &)>
|
||||
function,
|
||||
const std::vector<Expression *> &arguments)
|
||||
: Expression(uid), function_(function), arguments_(arguments) {}
|
||||
};
|
||||
@ -824,11 +825,11 @@ class Aggregation : public BinaryOperator {
|
||||
Aggregation(int uid, Expression *expression1, Expression *expression2, Op op)
|
||||
: BinaryOperator(uid, expression1, expression2), op_(op) {
|
||||
// COUNT without expression denotes COUNT(*) in cypher.
|
||||
debug_assert(expression1 || op == Aggregation::Op::COUNT,
|
||||
"All aggregations, except COUNT require expression");
|
||||
debug_assert(expression2 == nullptr ^ op == Aggregation::Op::COLLECT_MAP,
|
||||
"The second expression is obligatory in COLLECT_MAP and "
|
||||
"invalid otherwise");
|
||||
DCHECK(expression1 || op == Aggregation::Op::COUNT)
|
||||
<< "All aggregations, except COUNT require expression";
|
||||
DCHECK(expression2 == nullptr ^ op == Aggregation::Op::COLLECT_MAP)
|
||||
<< "The second expression is obligatory in COLLECT_MAP and "
|
||||
"invalid otherwise";
|
||||
}
|
||||
};
|
||||
|
||||
@ -862,9 +863,9 @@ class All : public Expression {
|
||||
identifier_(identifier),
|
||||
list_expression_(list_expression),
|
||||
where_(where) {
|
||||
debug_assert(identifier, "identifier must not be nullptr");
|
||||
debug_assert(list_expression, "list_expression must not be nullptr");
|
||||
debug_assert(where, "where must not be nullptr");
|
||||
DCHECK(identifier) << "identifier must not be nullptr";
|
||||
DCHECK(list_expression) << "list_expression must not be nullptr";
|
||||
DCHECK(where) << "where must not be nullptr";
|
||||
}
|
||||
};
|
||||
|
||||
@ -1545,8 +1546,8 @@ class Unwind : public Clause {
|
||||
protected:
|
||||
Unwind(int uid, NamedExpression *named_expression)
|
||||
: Clause(uid), named_expression_(named_expression) {
|
||||
debug_assert(named_expression,
|
||||
"Unwind cannot take nullptr for named_expression")
|
||||
DCHECK(named_expression)
|
||||
<< "Unwind cannot take nullptr for named_expression";
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -11,11 +11,12 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "database/graph_db.hpp"
|
||||
#include "query/common.hpp"
|
||||
#include "query/exceptions.hpp"
|
||||
#include "query/interpret/awesome_memgraph_functions.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -91,7 +92,7 @@ antlrcpp::Any CypherMainVisitor::visitSingleQuery(
|
||||
}
|
||||
has_create_index = true;
|
||||
} else {
|
||||
debug_assert(false, "Can't happen");
|
||||
DLOG(FATAL) << "Can't happen";
|
||||
}
|
||||
}
|
||||
if (!has_update && !has_return && !has_create_index) {
|
||||
@ -338,9 +339,9 @@ antlrcpp::Any CypherMainVisitor::visitSymbolicName(
|
||||
CypherParser::SymbolicNameContext *ctx) {
|
||||
if (ctx->EscapedSymbolicName()) {
|
||||
auto quoted_name = ctx->getText();
|
||||
debug_assert(quoted_name.size() >= 2U && quoted_name[0] == '`' &&
|
||||
quoted_name.back() == '`',
|
||||
"Can't happen. Grammar ensures this");
|
||||
DCHECK(quoted_name.size() >= 2U && quoted_name[0] == '`' &&
|
||||
quoted_name.back() == '`')
|
||||
<< "Can't happen. Grammar ensures this";
|
||||
// Remove enclosing backticks.
|
||||
std::string escaped_name =
|
||||
quoted_name.substr(1, static_cast<int>(quoted_name.size()) - 2);
|
||||
@ -353,7 +354,7 @@ antlrcpp::Any CypherMainVisitor::visitSymbolicName(
|
||||
name.push_back('`');
|
||||
escaped = false;
|
||||
} else {
|
||||
debug_assert(false, "Can't happen. Grammar ensures that.");
|
||||
DLOG(FATAL) << "Can't happen. Grammar ensures that.";
|
||||
}
|
||||
} else if (c == '`') {
|
||||
escaped = true;
|
||||
@ -519,13 +520,13 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipPattern(
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitRelationshipDetail(
|
||||
CypherParser::RelationshipDetailContext *) {
|
||||
debug_assert(false, "Should never be called. See documentation in hpp.");
|
||||
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitRelationshipLambda(
|
||||
CypherParser::RelationshipLambdaContext *) {
|
||||
debug_assert(false, "Should never be called. See documentation in hpp.");
|
||||
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -540,8 +541,8 @@ antlrcpp::Any CypherMainVisitor::visitRelationshipTypes(
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitVariableExpansion(
|
||||
CypherParser::VariableExpansionContext *ctx) {
|
||||
debug_assert(ctx->expression().size() <= 2U,
|
||||
"Expected 0, 1 or 2 bounds in range literal.");
|
||||
DCHECK(ctx->expression().size() <= 2U)
|
||||
<< "Expected 0, 1 or 2 bounds in range literal.";
|
||||
|
||||
bool is_bfs = !ctx->getTokens(CypherParser::BFS).empty();
|
||||
Expression *lower = nullptr;
|
||||
@ -667,7 +668,7 @@ antlrcpp::Any CypherMainVisitor::visitExpression8(
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitPartialComparisonExpression(
|
||||
CypherParser::PartialComparisonExpressionContext *) {
|
||||
debug_assert(false, "Should never be called. See documentation in hpp.");
|
||||
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -743,7 +744,7 @@ antlrcpp::Any CypherMainVisitor::visitExpression3a(
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitStringAndNullOperators(
|
||||
CypherParser::StringAndNullOperatorsContext *) {
|
||||
debug_assert(false, "Should never be called. See documentation in hpp.");
|
||||
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -776,7 +777,7 @@ antlrcpp::Any CypherMainVisitor::visitExpression3b(
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitListIndexingOrSlicing(
|
||||
CypherParser::ListIndexingOrSlicingContext *) {
|
||||
debug_assert(false, "Should never be called. See documentation in hpp.");
|
||||
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -874,7 +875,7 @@ antlrcpp::Any CypherMainVisitor::visitLiteral(
|
||||
return static_cast<Expression *>(storage_.Create<PrimitiveLiteral>(
|
||||
ctx->numberLiteral()->accept(this).as<TypedValue>(), token_position));
|
||||
}
|
||||
debug_fail("Expected to handle all cases above");
|
||||
LOG(FATAL) << "Expected to handle all cases above";
|
||||
} else if (ctx->listLiteral()) {
|
||||
return static_cast<Expression *>(storage_.Create<ListLiteral>(
|
||||
ctx->listLiteral()->accept(this).as<std::vector<Expression *>>()));
|
||||
@ -903,7 +904,7 @@ antlrcpp::Any CypherMainVisitor::visitNumberLiteral(
|
||||
} else {
|
||||
// This should never happen, except grammar changes and we don't notice
|
||||
// change in this production.
|
||||
debug_assert(false, "can't happen");
|
||||
DLOG(FATAL) << "can't happen";
|
||||
throw std::exception();
|
||||
}
|
||||
}
|
||||
@ -985,7 +986,7 @@ antlrcpp::Any CypherMainVisitor::visitBooleanLiteral(
|
||||
if (ctx->getTokens(CypherParser::FALSE).size()) {
|
||||
return false;
|
||||
}
|
||||
debug_assert(false, "Shouldn't happend");
|
||||
DLOG(FATAL) << "Shouldn't happend";
|
||||
throw std::exception();
|
||||
}
|
||||
|
||||
@ -1112,7 +1113,7 @@ antlrcpp::Any CypherMainVisitor::visitCaseExpression(
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCaseAlternatives(
|
||||
CypherParser::CaseAlternativesContext *) {
|
||||
debug_fail("Should never be called. See documentation in hpp.");
|
||||
DLOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1138,7 +1139,7 @@ antlrcpp::Any CypherMainVisitor::visitMerge(CypherParser::MergeContext *ctx) {
|
||||
if (merge_action->MATCH()) {
|
||||
merge->on_match_.insert(merge->on_match_.end(), set.begin(), set.end());
|
||||
} else {
|
||||
debug_assert(merge_action->CREATE(), "Expected ON MATCH or ON CREATE");
|
||||
DCHECK(merge_action->CREATE()) << "Expected ON MATCH or ON CREATE";
|
||||
merge->on_create_.insert(merge->on_create_.end(), set.begin(), set.end());
|
||||
}
|
||||
}
|
||||
@ -1155,7 +1156,7 @@ antlrcpp::Any CypherMainVisitor::visitUnwind(CypherParser::UnwindContext *ctx) {
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitFilterExpression(
|
||||
CypherParser::FilterExpressionContext *) {
|
||||
debug_fail("Should never be called. See documentation in hpp.");
|
||||
LOG(FATAL) << "Should never be called. See documentation in hpp.";
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
#include <utility>
|
||||
|
||||
#include "antlr4-runtime.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "query/context.hpp"
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/frontend/ast/named_antlr_tokens.hpp"
|
||||
@ -14,8 +16,8 @@
|
||||
namespace query {
|
||||
namespace frontend {
|
||||
|
||||
using query::Context;
|
||||
using antlropencypher::CypherParser;
|
||||
using query::Context;
|
||||
|
||||
class CypherMainVisitor : public antlropencypher::CypherBaseVisitor {
|
||||
public:
|
||||
@ -100,7 +102,7 @@ class CypherMainVisitor : public antlropencypher::CypherBaseVisitor {
|
||||
std::vector<TExpression *> _expressions,
|
||||
std::vector<antlr4::tree::ParseTree *> all_children,
|
||||
const std::vector<size_t> &allowed_operators) {
|
||||
debug_assert(_expressions.size(), "can't happen");
|
||||
DCHECK(_expressions.size()) << "can't happen";
|
||||
std::vector<Expression *> expressions;
|
||||
auto operators = ExtractOperators(all_children, allowed_operators);
|
||||
|
||||
@ -121,7 +123,7 @@ class CypherMainVisitor : public antlropencypher::CypherBaseVisitor {
|
||||
TExpression *_expression,
|
||||
std::vector<antlr4::tree::ParseTree *> all_children,
|
||||
const std::vector<size_t> &allowed_operators) {
|
||||
debug_assert(_expression, "can't happen");
|
||||
DCHECK(_expression) << "can't happen";
|
||||
auto operators = ExtractOperators(all_children, allowed_operators);
|
||||
|
||||
Expression *expression = _expression->accept(this);
|
||||
@ -575,5 +577,5 @@ class CypherMainVisitor : public antlropencypher::CypherBaseVisitor {
|
||||
// return.
|
||||
bool in_with_ = false;
|
||||
};
|
||||
}
|
||||
}
|
||||
} // namespace frontend
|
||||
} // namespace query
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include <experimental/optional>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "utils/algorithm.hpp"
|
||||
|
||||
namespace query {
|
||||
@ -298,8 +300,8 @@ bool SymbolGenerator::PreVisit(All &all) {
|
||||
bool SymbolGenerator::PreVisit(Pattern &pattern) {
|
||||
scope_.in_pattern = true;
|
||||
if ((scope_.in_create || scope_.in_merge) && pattern.atoms_.size() == 1U) {
|
||||
debug_assert(dynamic_cast<NodeAtom *>(pattern.atoms_[0]),
|
||||
"Expected a single NodeAtom in Pattern");
|
||||
DCHECK(dynamic_cast<NodeAtom *>(pattern.atoms_[0]))
|
||||
<< "Expected a single NodeAtom in Pattern";
|
||||
scope_.in_create_node = true;
|
||||
}
|
||||
return true;
|
||||
|
@ -6,13 +6,14 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "query/common.hpp"
|
||||
#include "query/exceptions.hpp"
|
||||
#include "query/frontend/opencypher/generated/CypherBaseVisitor.h"
|
||||
#include "query/frontend/opencypher/generated/CypherLexer.h"
|
||||
#include "query/frontend/opencypher/generated/CypherParser.h"
|
||||
#include "query/frontend/stripped_lexer_constants.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/hashing/fnv.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
@ -64,8 +65,9 @@ StrippedQuery::StrippedQuery(const std::string &query) : original_(query) {
|
||||
// A helper function that stores literal and its token position in a
|
||||
// literals_. In stripped query text literal is replaced with a new_value.
|
||||
// new_value can be any value that is lexed as a literal.
|
||||
auto replace_stripped = [this, &token_strings](
|
||||
int position, const TypedValue &value, const std::string &new_value) {
|
||||
auto replace_stripped = [this, &token_strings](int position,
|
||||
const TypedValue &value,
|
||||
const std::string &new_value) {
|
||||
literals_.Add(position, value);
|
||||
token_strings.push_back(new_value);
|
||||
};
|
||||
@ -88,7 +90,7 @@ StrippedQuery::StrippedQuery(const std::string &query) : original_(query) {
|
||||
int token_index = token_strings.size() * 2 + parameters_.size();
|
||||
switch (token.first) {
|
||||
case Token::UNMATCHED:
|
||||
debug_assert(false, "Shouldn't happen");
|
||||
LOG(FATAL) << "Shouldn't happen";
|
||||
case Token::KEYWORD: {
|
||||
token.second = utils::ToLowerCase(token.second);
|
||||
const auto &s = token.second;
|
||||
@ -505,4 +507,4 @@ int StrippedQuery::MatchWhitespaceAndComments(int start) const {
|
||||
if (state != State::OUT) return comment_position - start;
|
||||
return i - start;
|
||||
}
|
||||
}
|
||||
} // namespace query
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include "query/parameters.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/hashing/fnv.hpp"
|
||||
|
||||
namespace query {
|
||||
@ -90,4 +89,4 @@ class StrippedQuery {
|
||||
// Hash based on the stripped query.
|
||||
HashType hash_;
|
||||
};
|
||||
}
|
||||
} // namespace query
|
||||
|
@ -535,7 +535,8 @@ TypedValue Counter(const std::vector<TypedValue> &args, GraphDbAccessor &dba) {
|
||||
return dba.Counter(args[0].ValueString());
|
||||
}
|
||||
|
||||
TypedValue CounterSet(const std::vector<TypedValue> &args, GraphDbAccessor &dba) {
|
||||
TypedValue CounterSet(const std::vector<TypedValue> &args,
|
||||
GraphDbAccessor &dba) {
|
||||
if (args.size() != 2U) {
|
||||
throw QueryRuntimeException("counterSet takes two arguments");
|
||||
}
|
||||
@ -556,7 +557,7 @@ TypedValue IndexInfo(const std::vector<TypedValue> &args,
|
||||
auto info = dba.IndexInfo();
|
||||
return std::vector<TypedValue>(info.begin(), info.end());
|
||||
}
|
||||
} // annonymous namespace
|
||||
} // namespace
|
||||
|
||||
std::function<TypedValue(const std::vector<TypedValue> &, GraphDbAccessor &)>
|
||||
NameToFunction(const std::string &function_name) {
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include "query/frontend/semantic/symbol_table.hpp"
|
||||
#include "query/interpret/frame.hpp"
|
||||
#include "query/typed_value.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace query {
|
||||
@ -31,9 +30,9 @@ class ExpressionEvaluator : public TreeVisitor<TypedValue> {
|
||||
|
||||
using TreeVisitor<TypedValue>::Visit;
|
||||
|
||||
#define BLOCK_VISIT(TREE_TYPE) \
|
||||
TypedValue Visit(TREE_TYPE &) override { \
|
||||
permanent_fail("ExpressionEvaluator should not visit " #TREE_TYPE); \
|
||||
#define BLOCK_VISIT(TREE_TYPE) \
|
||||
TypedValue Visit(TREE_TYPE &) override { \
|
||||
LOG(FATAL) << "ExpressionEvaluator should not visit " #TREE_TYPE; \
|
||||
}
|
||||
|
||||
BLOCK_VISIT(Query);
|
||||
@ -388,7 +387,7 @@ class ExpressionEvaluator : public TreeVisitor<TypedValue> {
|
||||
vertex.SwitchOld();
|
||||
break;
|
||||
default:
|
||||
permanent_fail("Unhandled GraphView enum");
|
||||
LOG(FATAL) << "Unhandled GraphView enum";
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -402,7 +401,7 @@ class ExpressionEvaluator : public TreeVisitor<TypedValue> {
|
||||
edge.SwitchOld();
|
||||
break;
|
||||
default:
|
||||
permanent_fail("Unhandled GraphView enum");
|
||||
LOG(FATAL) << "Unhandled GraphView enum";
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -39,8 +39,8 @@ struct Parameters {
|
||||
[&](const std::pair<int, query::TypedValue> a) {
|
||||
return a.first == position;
|
||||
});
|
||||
permanent_assert(found != storage_.end(),
|
||||
"Token position must be present in container");
|
||||
CHECK(found != storage_.end())
|
||||
<< "Token position must be present in container";
|
||||
return found->second;
|
||||
}
|
||||
|
||||
@ -52,8 +52,7 @@ struct Parameters {
|
||||
* @return Token position and value for sought param.
|
||||
*/
|
||||
const std::pair<int, query::TypedValue> &At(int position) const {
|
||||
permanent_assert(position < static_cast<int>(storage_.size()),
|
||||
"Invalid position");
|
||||
CHECK(position < static_cast<int>(storage_.size())) << "Invalid position";
|
||||
return storage_[position];
|
||||
}
|
||||
|
||||
|
@ -3,9 +3,10 @@
|
||||
#include <functional>
|
||||
#include <utility>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "storage/edge_accessor.hpp"
|
||||
#include "storage/vertex_accessor.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
namespace query {
|
||||
|
||||
@ -29,15 +30,15 @@ class Path {
|
||||
|
||||
/** Expands the path with the given vertex. */
|
||||
void Expand(const VertexAccessor &vertex) {
|
||||
debug_assert(vertices_.size() == edges_.size(),
|
||||
"Illegal path construction order");
|
||||
DCHECK(vertices_.size() == edges_.size())
|
||||
<< "Illegal path construction order";
|
||||
vertices_.emplace_back(vertex);
|
||||
}
|
||||
|
||||
/** Expands the path with the given edge. */
|
||||
void Expand(const EdgeAccessor &edge) {
|
||||
debug_assert(vertices_.size() - 1 == edges_.size(),
|
||||
"Illegal path construction order");
|
||||
DCHECK(vertices_.size() - 1 == edges_.size())
|
||||
<< "Illegal path construction order";
|
||||
edges_.emplace_back(edge);
|
||||
}
|
||||
|
||||
@ -61,8 +62,8 @@ class Path {
|
||||
}
|
||||
|
||||
friend std::ostream &operator<<(std::ostream &os, const Path &path) {
|
||||
debug_assert(path.vertices_.size() > 0U,
|
||||
"Attempting to stream out an invalid path");
|
||||
DCHECK(path.vertices_.size() > 0U)
|
||||
<< "Attempting to stream out an invalid path";
|
||||
os << path.vertices_[0];
|
||||
for (int i = 0; i < static_cast<int>(path.edges_.size()); i++) {
|
||||
bool arrow_to_left = path.vertices_[i] == path.edges_[i].to();
|
||||
|
@ -3,13 +3,15 @@
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "query/context.hpp"
|
||||
#include "query/exceptions.hpp"
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
#include "query/interpret/eval.hpp"
|
||||
|
||||
#include "query/plan/operator.hpp"
|
||||
|
||||
// macro for the default implementation of LogicalOperator::Accept
|
||||
// that accepts the visitor and visits it's input_ operator
|
||||
#define ACCEPT_WITH_INPUT(class_name) \
|
||||
@ -251,8 +253,8 @@ ScanAll::ScanAll(const std::shared_ptr<LogicalOperator> &input,
|
||||
: input_(input ? input : std::make_shared<Once>()),
|
||||
output_symbol_(output_symbol),
|
||||
graph_view_(graph_view) {
|
||||
permanent_assert(graph_view != GraphView::AS_IS,
|
||||
"ScanAll must have explicitly defined GraphView");
|
||||
CHECK(graph_view != GraphView::AS_IS)
|
||||
<< "ScanAll must have explicitly defined GraphView";
|
||||
}
|
||||
|
||||
ACCEPT_WITH_INPUT(ScanAll)
|
||||
@ -290,7 +292,7 @@ ScanAllByLabelPropertyRange::ScanAllByLabelPropertyRange(
|
||||
property_(property),
|
||||
lower_bound_(lower_bound),
|
||||
upper_bound_(upper_bound) {
|
||||
debug_assert(lower_bound_ || upper_bound_, "Only one bound can be left out");
|
||||
DCHECK(lower_bound_ || upper_bound_) << "Only one bound can be left out";
|
||||
}
|
||||
|
||||
ACCEPT_WITH_INPUT(ScanAllByLabelPropertyRange)
|
||||
@ -302,10 +304,10 @@ std::unique_ptr<Cursor> ScanAllByLabelPropertyRange::MakeCursor(
|
||||
context.symbol_table_, db, graph_view_);
|
||||
auto convert = [&evaluator](const auto &bound)
|
||||
-> std::experimental::optional<utils::Bound<PropertyValue>> {
|
||||
if (!bound) return std::experimental::nullopt;
|
||||
return std::experimental::make_optional(utils::Bound<PropertyValue>(
|
||||
bound.value().value()->Accept(evaluator), bound.value().type()));
|
||||
};
|
||||
if (!bound) return std::experimental::nullopt;
|
||||
return std::experimental::make_optional(utils::Bound<PropertyValue>(
|
||||
bound.value().value()->Accept(evaluator), bound.value().type()));
|
||||
};
|
||||
return db.Vertices(label_, property_, convert(lower_bound()),
|
||||
convert(upper_bound()), graph_view_ == GraphView::NEW);
|
||||
};
|
||||
@ -321,7 +323,7 @@ ScanAllByLabelPropertyValue::ScanAllByLabelPropertyValue(
|
||||
label_(label),
|
||||
property_(property),
|
||||
expression_(expression) {
|
||||
debug_assert(expression, "Expression is not optional.");
|
||||
DCHECK(expression) << "Expression is not optional.";
|
||||
}
|
||||
|
||||
ACCEPT_WITH_INPUT(ScanAllByLabelPropertyValue)
|
||||
@ -430,7 +432,7 @@ bool Expand::ExpandCursor::Pull(Frame &frame, Context &context) {
|
||||
frame[self_.node_symbol_] = new_edge.to();
|
||||
break;
|
||||
case EdgeAtom::Direction::BOTH:
|
||||
permanent_fail("Must indicate exact expansion direction here");
|
||||
LOG(FATAL) << "Must indicate exact expansion direction here";
|
||||
}
|
||||
};
|
||||
|
||||
@ -561,12 +563,11 @@ ExpandVariable::ExpandVariable(
|
||||
inner_edge_symbol_(inner_edge_symbol),
|
||||
inner_node_symbol_(inner_node_symbol),
|
||||
filter_(filter) {
|
||||
debug_assert(
|
||||
type_ == EdgeAtom::Type::DEPTH_FIRST ||
|
||||
type_ == EdgeAtom::Type::BREADTH_FIRST,
|
||||
"ExpandVariable can only be used with breadth or depth first type");
|
||||
debug_assert(!(type_ == EdgeAtom::Type::BREADTH_FIRST && is_reverse),
|
||||
"Breadth first expansion can't be reversed");
|
||||
DCHECK(type_ == EdgeAtom::Type::DEPTH_FIRST ||
|
||||
type_ == EdgeAtom::Type::BREADTH_FIRST)
|
||||
<< "ExpandVariable can only be used with breadth or depth first type";
|
||||
DCHECK(!(type_ == EdgeAtom::Type::BREADTH_FIRST && is_reverse))
|
||||
<< "Breadth first expansion can't be reversed";
|
||||
}
|
||||
|
||||
ACCEPT_WITH_INPUT(ExpandVariable)
|
||||
@ -747,7 +748,7 @@ class ExpandVariableCursor : public Cursor {
|
||||
std::vector<TypedValue> &edges_on_frame) {
|
||||
// We are placing an edge on the frame. It is possible that there already
|
||||
// exists an edge on the frame for this level. If so first remove it.
|
||||
debug_assert(edges_.size() > 0, "Edges are empty");
|
||||
DCHECK(edges_.size() > 0) << "Edges are empty";
|
||||
if (self_.is_reverse_) {
|
||||
// TODO: This is innefficient, we should look into replacing
|
||||
// vector with something else for TypedValue::List.
|
||||
@ -1027,8 +1028,8 @@ class ConstructNamedPathCursor : public Cursor {
|
||||
if (!input_cursor_->Pull(frame, context)) return false;
|
||||
|
||||
auto symbol_it = self_.path_elements().begin();
|
||||
debug_assert(symbol_it != self_.path_elements().end(),
|
||||
"Named path must contain at least one node");
|
||||
DCHECK(symbol_it != self_.path_elements().end())
|
||||
<< "Named path must contain at least one node";
|
||||
|
||||
TypedValue start_vertex = frame[*symbol_it++];
|
||||
|
||||
@ -1038,8 +1039,8 @@ class ConstructNamedPathCursor : public Cursor {
|
||||
return true;
|
||||
}
|
||||
|
||||
debug_assert(start_vertex.IsVertex(),
|
||||
"First named path element must be a vertex");
|
||||
DCHECK(start_vertex.IsVertex())
|
||||
<< "First named path element must be a vertex";
|
||||
query::Path path(start_vertex.ValueVertex());
|
||||
|
||||
// If the last path element symbol was for an edge list, then
|
||||
@ -1079,7 +1080,7 @@ class ConstructNamedPathCursor : public Cursor {
|
||||
break;
|
||||
}
|
||||
default:
|
||||
permanent_fail("Unsupported type in named path construction");
|
||||
LOG(FATAL) << "Unsupported type in named path construction";
|
||||
|
||||
break;
|
||||
}
|
||||
@ -1763,12 +1764,12 @@ void Aggregate::AggregateCursor::EnsureInitialized(
|
||||
void Aggregate::AggregateCursor::Update(
|
||||
Frame &, const SymbolTable &, ExpressionEvaluator &evaluator,
|
||||
Aggregate::AggregateCursor::AggregationValue &agg_value) {
|
||||
debug_assert(self_.aggregations_.size() == agg_value.values_.size(),
|
||||
"Expected as much AggregationValue.values_ as there are "
|
||||
"aggregations.");
|
||||
debug_assert(self_.aggregations_.size() == agg_value.counts_.size(),
|
||||
"Expected as much AggregationValue.counts_ as there are "
|
||||
"aggregations.");
|
||||
DCHECK(self_.aggregations_.size() == agg_value.values_.size())
|
||||
<< "Expected as much AggregationValue.values_ as there are "
|
||||
"aggregations.";
|
||||
DCHECK(self_.aggregations_.size() == agg_value.counts_.size())
|
||||
<< "Expected as much AggregationValue.counts_ as there are "
|
||||
"aggregations.";
|
||||
|
||||
// we iterate over counts, values and aggregation info at the same time
|
||||
auto count_it = agg_value.counts_.begin();
|
||||
@ -1909,9 +1910,9 @@ void Aggregate::AggregateCursor::EnsureOkForAvgSum(
|
||||
bool TypedValueVectorEqual::operator()(
|
||||
const std::vector<TypedValue> &left,
|
||||
const std::vector<TypedValue> &right) const {
|
||||
debug_assert(left.size() == right.size(),
|
||||
"TypedValueVector comparison should only be done over vectors "
|
||||
"of the same size");
|
||||
DCHECK(left.size() == right.size())
|
||||
<< "TypedValueVector comparison should only be done over vectors "
|
||||
"of the same size";
|
||||
return std::equal(left.begin(), left.end(), right.begin(),
|
||||
TypedValue::BoolEqual{});
|
||||
}
|
||||
@ -2075,9 +2076,9 @@ bool OrderBy::OrderByCursor::Pull(Frame &frame, Context &context) {
|
||||
if (cache_it_ == cache_.end()) return false;
|
||||
|
||||
// place the output values on the frame
|
||||
debug_assert(self_.output_symbols_.size() == cache_it_->second.size(),
|
||||
"Number of values does not match the number of output symbols "
|
||||
"in OrderBy");
|
||||
DCHECK(self_.output_symbols_.size() == cache_it_->second.size())
|
||||
<< "Number of values does not match the number of output symbols "
|
||||
"in OrderBy";
|
||||
auto output_sym_it = self_.output_symbols_.begin();
|
||||
for (const TypedValue &output : cache_it_->second)
|
||||
frame[*output_sym_it++] = output;
|
||||
@ -2131,7 +2132,7 @@ bool OrderBy::TypedValueCompare(const TypedValue &a, const TypedValue &b) {
|
||||
throw QueryRuntimeException(
|
||||
"Comparison is not defined for values of type {}", a.type());
|
||||
default:
|
||||
permanent_fail("Unhandled comparison for types");
|
||||
LOG(FATAL) << "Unhandled comparison for types";
|
||||
}
|
||||
}
|
||||
|
||||
@ -2140,8 +2141,8 @@ bool OrderBy::TypedValueVectorCompare::operator()(
|
||||
const std::vector<TypedValue> &c2) const {
|
||||
// ordering is invalid if there are more elements in the collections
|
||||
// then there are in the ordering_ vector
|
||||
debug_assert(c1.size() <= ordering_.size() && c2.size() <= ordering_.size(),
|
||||
"Collections contain more elements then there are orderings");
|
||||
DCHECK(c1.size() <= ordering_.size() && c2.size() <= ordering_.size())
|
||||
<< "Collections contain more elements then there are orderings";
|
||||
|
||||
auto c1_it = c1.begin();
|
||||
auto c2_it = c2.begin();
|
||||
@ -2208,7 +2209,7 @@ bool Merge::MergeCursor::Pull(Frame &frame, Context &context) {
|
||||
// and failed to pull from merge_match, we should create
|
||||
__attribute__((unused)) bool merge_create_pull_result =
|
||||
merge_create_cursor_->Pull(frame, context);
|
||||
debug_assert(merge_create_pull_result, "MergeCreate must never fail");
|
||||
DCHECK(merge_create_pull_result) << "MergeCreate must never fail";
|
||||
return true;
|
||||
}
|
||||
// we have exhausted merge_match_cursor_ after 1 or more successful
|
||||
|
@ -43,20 +43,20 @@ template <typename T>
|
||||
auto ReducePattern(
|
||||
Pattern &pattern, std::function<T(NodeAtom *)> base,
|
||||
std::function<T(T, NodeAtom *, EdgeAtom *, NodeAtom *)> collect) {
|
||||
debug_assert(!pattern.atoms_.empty(), "Missing atoms in pattern");
|
||||
DCHECK(!pattern.atoms_.empty()) << "Missing atoms in pattern";
|
||||
auto atoms_it = pattern.atoms_.begin();
|
||||
auto current_node = dynamic_cast<NodeAtom *>(*atoms_it++);
|
||||
debug_assert(current_node, "First pattern atom is not a node");
|
||||
DCHECK(current_node) << "First pattern atom is not a node";
|
||||
auto last_res = base(current_node);
|
||||
// Remaining atoms need to follow sequentially as (EdgeAtom, NodeAtom)*
|
||||
while (atoms_it != pattern.atoms_.end()) {
|
||||
auto edge = dynamic_cast<EdgeAtom *>(*atoms_it++);
|
||||
debug_assert(edge, "Expected an edge atom in pattern.");
|
||||
debug_assert(atoms_it != pattern.atoms_.end(),
|
||||
"Edge atom should not end the pattern.");
|
||||
DCHECK(edge) << "Expected an edge atom in pattern.";
|
||||
DCHECK(atoms_it != pattern.atoms_.end())
|
||||
<< "Edge atom should not end the pattern.";
|
||||
auto prev_node = current_node;
|
||||
current_node = dynamic_cast<NodeAtom *>(*atoms_it++);
|
||||
debug_assert(current_node, "Expected a node atom in pattern.");
|
||||
DCHECK(current_node) << "Expected a node atom in pattern.";
|
||||
last_res = collect(last_res, prev_node, edge, current_node);
|
||||
}
|
||||
return last_res;
|
||||
@ -65,20 +65,20 @@ auto ReducePattern(
|
||||
void ForEachPattern(
|
||||
Pattern &pattern, std::function<void(NodeAtom *)> base,
|
||||
std::function<void(NodeAtom *, EdgeAtom *, NodeAtom *)> collect) {
|
||||
debug_assert(!pattern.atoms_.empty(), "Missing atoms in pattern");
|
||||
DCHECK(!pattern.atoms_.empty()) << "Missing atoms in pattern";
|
||||
auto atoms_it = pattern.atoms_.begin();
|
||||
auto current_node = dynamic_cast<NodeAtom *>(*atoms_it++);
|
||||
debug_assert(current_node, "First pattern atom is not a node");
|
||||
DCHECK(current_node) << "First pattern atom is not a node";
|
||||
base(current_node);
|
||||
// Remaining atoms need to follow sequentially as (EdgeAtom, NodeAtom)*
|
||||
while (atoms_it != pattern.atoms_.end()) {
|
||||
auto edge = dynamic_cast<EdgeAtom *>(*atoms_it++);
|
||||
debug_assert(edge, "Expected an edge atom in pattern.");
|
||||
debug_assert(atoms_it != pattern.atoms_.end(),
|
||||
"Edge atom should not end the pattern.");
|
||||
DCHECK(edge) << "Expected an edge atom in pattern.";
|
||||
DCHECK(atoms_it != pattern.atoms_.end())
|
||||
<< "Edge atom should not end the pattern.";
|
||||
auto prev_node = current_node;
|
||||
current_node = dynamic_cast<NodeAtom *>(*atoms_it++);
|
||||
debug_assert(current_node, "Expected a node atom in pattern.");
|
||||
DCHECK(current_node) << "Expected a node atom in pattern.";
|
||||
collect(prev_node, edge, current_node);
|
||||
}
|
||||
}
|
||||
@ -100,8 +100,8 @@ class UsedSymbolsCollector : public HierarchicalTreeVisitor {
|
||||
explicit UsedSymbolsCollector(const SymbolTable &symbol_table)
|
||||
: symbol_table_(symbol_table) {}
|
||||
|
||||
using HierarchicalTreeVisitor::PreVisit;
|
||||
using HierarchicalTreeVisitor::PostVisit;
|
||||
using HierarchicalTreeVisitor::PreVisit;
|
||||
using HierarchicalTreeVisitor::Visit;
|
||||
|
||||
bool PostVisit(All &all) override {
|
||||
@ -181,14 +181,14 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
if (where) {
|
||||
where->Accept(*this);
|
||||
}
|
||||
debug_assert(aggregations_.empty(),
|
||||
"Unexpected aggregations in ORDER BY or WHERE");
|
||||
DCHECK(aggregations_.empty())
|
||||
<< "Unexpected aggregations in ORDER BY or WHERE";
|
||||
}
|
||||
}
|
||||
|
||||
using HierarchicalTreeVisitor::PostVisit;
|
||||
using HierarchicalTreeVisitor::PreVisit;
|
||||
using HierarchicalTreeVisitor::Visit;
|
||||
using HierarchicalTreeVisitor::PostVisit;
|
||||
|
||||
bool Visit(PrimitiveLiteral &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
@ -196,9 +196,9 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
}
|
||||
|
||||
bool PostVisit(ListLiteral &list_literal) override {
|
||||
debug_assert(
|
||||
list_literal.elements_.size() <= has_aggregation_.size(),
|
||||
"Expected has_aggregation_ flags as much as there are list elements.");
|
||||
DCHECK(list_literal.elements_.size() <= has_aggregation_.size())
|
||||
<< "Expected has_aggregation_ flags as much as there are list "
|
||||
"elements.";
|
||||
bool has_aggr = false;
|
||||
auto it = has_aggregation_.end();
|
||||
std::advance(it, -list_literal.elements_.size());
|
||||
@ -211,9 +211,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
}
|
||||
|
||||
bool PostVisit(MapLiteral &map_literal) override {
|
||||
debug_assert(
|
||||
map_literal.elements_.size() <= has_aggregation_.size(),
|
||||
"Expected has_aggregation_ flags as much as there are map elements.");
|
||||
DCHECK(map_literal.elements_.size() <= has_aggregation_.size())
|
||||
<< "Expected has_aggregation_ flags as much as there are map elements.";
|
||||
bool has_aggr = false;
|
||||
auto it = has_aggregation_.end();
|
||||
std::advance(it, -map_literal.elements_.size());
|
||||
@ -229,8 +228,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
// Remove the symbol which is bound by all, because we are only interested
|
||||
// in free (unbound) symbols.
|
||||
used_symbols_.erase(symbol_table_.at(*all.identifier_));
|
||||
debug_assert(has_aggregation_.size() >= 3U,
|
||||
"Expected 3 has_aggregation_ flags for ALL arguments");
|
||||
DCHECK(has_aggregation_.size() >= 3U)
|
||||
<< "Expected 3 has_aggregation_ flags for ALL arguments";
|
||||
bool has_aggr = false;
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
has_aggr = has_aggr || has_aggregation_.back();
|
||||
@ -289,14 +288,14 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
has_aggregation_.emplace_back(has_aggr);
|
||||
// TODO: Once we allow aggregations here, insert appropriate stuff in
|
||||
// group_by.
|
||||
debug_assert(!has_aggr, "Currently aggregations in CASE are not allowed");
|
||||
DCHECK(!has_aggr) << "Currently aggregations in CASE are not allowed";
|
||||
return false;
|
||||
}
|
||||
|
||||
bool PostVisit(Function &function) override {
|
||||
debug_assert(function.arguments_.size() <= has_aggregation_.size(),
|
||||
"Expected has_aggregation_ flags as much as there are "
|
||||
"function arguments.");
|
||||
DCHECK(function.arguments_.size() <= has_aggregation_.size())
|
||||
<< "Expected has_aggregation_ flags as much as there are "
|
||||
"function arguments.";
|
||||
bool has_aggr = false;
|
||||
auto it = has_aggregation_.end();
|
||||
std::advance(it, -function.arguments_.size());
|
||||
@ -310,8 +309,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
|
||||
#define VISIT_BINARY_OPERATOR(BinaryOperator) \
|
||||
bool PostVisit(BinaryOperator &op) override { \
|
||||
debug_assert(has_aggregation_.size() >= 2U, \
|
||||
"Expected at least 2 has_aggregation_ flags."); \
|
||||
DCHECK(has_aggregation_.size() >= 2U) \
|
||||
<< "Expected at least 2 has_aggregation_ flags."; \
|
||||
/* has_aggregation_ stack is reversed, last result is from the 2nd */ \
|
||||
/* expression. */ \
|
||||
bool aggr2 = has_aggregation_.back(); \
|
||||
@ -368,8 +367,8 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
}
|
||||
|
||||
bool PostVisit(NamedExpression &named_expr) override {
|
||||
debug_assert(has_aggregation_.size() == 1U,
|
||||
"Expected to reduce has_aggregation_ to single boolean.");
|
||||
DCHECK(has_aggregation_.size() == 1U)
|
||||
<< "Expected to reduce has_aggregation_ to single boolean.";
|
||||
if (!has_aggregation_.back()) {
|
||||
group_by_.emplace_back(named_expr.expression_);
|
||||
}
|
||||
@ -391,11 +390,10 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
// This should be used when body.all_identifiers is true, to generate
|
||||
// expressions for Produce operator.
|
||||
void ExpandUserSymbols() {
|
||||
debug_assert(
|
||||
named_expressions_.empty(),
|
||||
"ExpandUserSymbols should be first to fill named_expressions_");
|
||||
debug_assert(output_symbols_.empty(),
|
||||
"ExpandUserSymbols should be first to fill output_symbols_");
|
||||
DCHECK(named_expressions_.empty())
|
||||
<< "ExpandUserSymbols should be first to fill named_expressions_";
|
||||
DCHECK(output_symbols_.empty())
|
||||
<< "ExpandUserSymbols should be first to fill output_symbols_";
|
||||
for (const auto &symbol : bound_symbols_) {
|
||||
if (!symbol.user_declared()) {
|
||||
continue;
|
||||
@ -536,7 +534,7 @@ std::vector<Expansion> NormalizePatterns(
|
||||
for (const auto &pattern : patterns) {
|
||||
if (pattern->atoms_.size() == 1U) {
|
||||
auto *node = dynamic_cast<NodeAtom *>(pattern->atoms_[0]);
|
||||
debug_assert(node, "First pattern atom is not a node");
|
||||
DCHECK(node) << "First pattern atom is not a node";
|
||||
expansions.emplace_back(Expansion{node});
|
||||
} else {
|
||||
ForEachPattern(*pattern, ignore_node, collect_expansion);
|
||||
@ -719,7 +717,7 @@ LogicalOperator *GenCreateForPattern(
|
||||
node_existing = true;
|
||||
}
|
||||
if (!BindSymbol(bound_symbols, symbol_table.at(*edge->identifier_))) {
|
||||
permanent_fail("Symbols used for created edges cannot be redeclared.");
|
||||
LOG(FATAL) << "Symbols used for created edges cannot be redeclared.";
|
||||
}
|
||||
return new CreateExpand(node, edge,
|
||||
std::shared_ptr<LogicalOperator>(last_op),
|
||||
@ -1004,8 +1002,8 @@ std::vector<QueryPart> CollectQueryParts(SymbolTable &symbol_table,
|
||||
AddMatching(*match, symbol_table, storage,
|
||||
query_part->optional_matching.back());
|
||||
} else {
|
||||
debug_assert(query_part->optional_matching.empty(),
|
||||
"Match clause cannot follow optional match.");
|
||||
DCHECK(query_part->optional_matching.empty())
|
||||
<< "Match clause cannot follow optional match.";
|
||||
AddMatching(*match, symbol_table, storage, query_part->matching);
|
||||
}
|
||||
} else {
|
||||
|
@ -300,8 +300,8 @@ class RuleBasedPlanner {
|
||||
}
|
||||
int merge_id = 0;
|
||||
for (auto &clause : query_part.remaining_clauses) {
|
||||
debug_assert(dynamic_cast<Match *>(clause) == nullptr,
|
||||
"Unexpected Match in remaining clauses");
|
||||
DCHECK(dynamic_cast<Match *>(clause) == nullptr)
|
||||
<< "Unexpected Match in remaining clauses";
|
||||
if (auto *ret = dynamic_cast<Return *>(clause)) {
|
||||
input_op =
|
||||
impl::GenReturn(*ret, input_op, context.symbol_table, is_write,
|
||||
@ -332,7 +332,7 @@ class RuleBasedPlanner {
|
||||
unwind->named_expression_->expression_, symbol);
|
||||
} else if (auto *create_index =
|
||||
dynamic_cast<query::CreateIndex *>(clause)) {
|
||||
debug_assert(!input_op, "Unexpected operator before CreateIndex");
|
||||
DCHECK(!input_op) << "Unexpected operator before CreateIndex";
|
||||
input_op = new plan::CreateIndex(create_index->label_,
|
||||
create_index->property_);
|
||||
} else {
|
||||
@ -403,8 +403,8 @@ class RuleBasedPlanner {
|
||||
|
||||
const GraphDbTypes::Label &FindBestLabelIndex(
|
||||
const std::unordered_set<GraphDbTypes::Label> &labels) {
|
||||
debug_assert(!labels.empty(),
|
||||
"Trying to find the best label without any labels.");
|
||||
DCHECK(!labels.empty())
|
||||
<< "Trying to find the best label without any labels.";
|
||||
return *std::min_element(labels.begin(), labels.end(),
|
||||
[this](const auto &label1, const auto &label2) {
|
||||
return context_.db.VerticesCount(label1) <
|
||||
@ -454,9 +454,8 @@ class RuleBasedPlanner {
|
||||
best_property.first, prop_filter.lower_bound,
|
||||
prop_filter.upper_bound, match_ctx.graph_view);
|
||||
} else {
|
||||
debug_assert(
|
||||
prop_filter.expression,
|
||||
"Property filter should either have bounds or an expression.");
|
||||
DCHECK(prop_filter.expression)
|
||||
<< "Property filter should either have bounds or an expression.";
|
||||
return new ScanAllByLabelPropertyValue(
|
||||
std::shared_ptr<LogicalOperator>(last_op), node_symbol, best_label,
|
||||
best_property.first, prop_filter.expression, match_ctx.graph_view);
|
||||
@ -519,8 +518,8 @@ class RuleBasedPlanner {
|
||||
symbol_table.at(*expansion.node2->identifier_);
|
||||
auto existing_node = utils::Contains(bound_symbols, node_symbol);
|
||||
const auto &edge_symbol = symbol_table.at(*edge->identifier_);
|
||||
debug_assert(!utils::Contains(bound_symbols, edge_symbol),
|
||||
"Existing edges are not supported");
|
||||
DCHECK(!utils::Contains(bound_symbols, edge_symbol))
|
||||
<< "Existing edges are not supported";
|
||||
if (edge->IsVariable()) {
|
||||
Symbol inner_edge_symbol = symbol_table.at(*edge->inner_edge_);
|
||||
Symbol inner_node_symbol = symbol_table.at(*edge->inner_node_);
|
||||
@ -531,8 +530,8 @@ class RuleBasedPlanner {
|
||||
impl::BindSymbol(bound_symbols, inner_edge_symbol);
|
||||
bool inner_node_bound =
|
||||
impl::BindSymbol(bound_symbols, inner_node_symbol);
|
||||
debug_assert(inner_edge_bound && inner_node_bound,
|
||||
"An inner edge and node can't be bound from before");
|
||||
DCHECK(inner_edge_bound && inner_node_bound)
|
||||
<< "An inner edge and node can't be bound from before";
|
||||
}
|
||||
auto *filter_expr = impl::BoolJoin<AndOperator>(
|
||||
storage,
|
||||
@ -614,7 +613,7 @@ class RuleBasedPlanner {
|
||||
impl::GenFilters(last_op, bound_symbols, all_filters, storage);
|
||||
}
|
||||
}
|
||||
debug_assert(all_filters.empty(), "Expected to generate all filters");
|
||||
DCHECK(all_filters.empty()) << "Expected to generate all filters";
|
||||
return last_op;
|
||||
}
|
||||
|
||||
@ -633,12 +632,12 @@ class RuleBasedPlanner {
|
||||
for (auto &set : merge.on_create_) {
|
||||
on_create = impl::HandleWriteClause(set, on_create, context_.symbol_table,
|
||||
context_.bound_symbols);
|
||||
debug_assert(on_create, "Expected SET in MERGE ... ON CREATE");
|
||||
DCHECK(on_create) << "Expected SET in MERGE ... ON CREATE";
|
||||
}
|
||||
for (auto &set : merge.on_match_) {
|
||||
on_match = impl::HandleWriteClause(set, on_match, context_.symbol_table,
|
||||
context_.bound_symbols);
|
||||
debug_assert(on_match, "Expected SET in MERGE ... ON MATCH");
|
||||
DCHECK(on_match) << "Expected SET in MERGE ... ON MATCH";
|
||||
}
|
||||
return new plan::Merge(std::shared_ptr<LogicalOperator>(input_op),
|
||||
std::shared_ptr<LogicalOperator>(on_match),
|
||||
|
@ -3,6 +3,8 @@
|
||||
#include <limits>
|
||||
#include <queue>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "utils/flag_validation.hpp"
|
||||
|
||||
DEFINE_VALIDATED_HIDDEN_uint64(
|
||||
@ -89,10 +91,9 @@ void AddNextExpansions(
|
||||
}
|
||||
if (symbol_table.at(*expansion.node1->identifier_) != node_symbol) {
|
||||
// We are not expanding from node1, so flip the expansion.
|
||||
debug_assert(
|
||||
expansion.node2 &&
|
||||
symbol_table.at(*expansion.node2->identifier_) == node_symbol,
|
||||
"Expected node_symbol to be bound in node2");
|
||||
DCHECK(expansion.node2 &&
|
||||
symbol_table.at(*expansion.node2->identifier_) == node_symbol)
|
||||
<< "Expected node_symbol to be bound in node2";
|
||||
if (expansion.edge->type_ != EdgeAtom::Type::BREADTH_FIRST) {
|
||||
// BFS must *not* be flipped. Doing that changes the BFS results.
|
||||
std::swap(expansion.node1, expansion.node2);
|
||||
@ -214,9 +215,9 @@ class VaryMatchingStart {
|
||||
current_matching_.expansions = ExpansionsFrom(
|
||||
**start_nodes_it_, self_.matching_, self_.symbol_table_);
|
||||
}
|
||||
debug_assert(
|
||||
start_nodes_it_ || self_.nodes_.empty(),
|
||||
"start_nodes_it_ should only be nullopt when self_.nodes_ is empty");
|
||||
DCHECK(start_nodes_it_ || self_.nodes_.empty())
|
||||
<< "start_nodes_it_ should only be nullopt when self_.nodes_ is "
|
||||
"empty";
|
||||
if (is_done) {
|
||||
start_nodes_it_ = self.nodes_.end();
|
||||
}
|
||||
@ -224,9 +225,9 @@ class VaryMatchingStart {
|
||||
|
||||
iterator &operator++() {
|
||||
if (!start_nodes_it_) {
|
||||
debug_assert(self_.nodes_.empty(),
|
||||
"start_nodes_it_ should only be nullopt when self_.nodes_ "
|
||||
"is empty");
|
||||
DCHECK(self_.nodes_.empty())
|
||||
<< "start_nodes_it_ should only be nullopt when self_.nodes_ "
|
||||
"is empty";
|
||||
start_nodes_it_ = self_.nodes_.end();
|
||||
}
|
||||
if (*start_nodes_it_ == self_.nodes_.end()) {
|
||||
|
@ -81,9 +81,8 @@ class CartesianProduct {
|
||||
sets_it->second++;
|
||||
}
|
||||
// We can now collect another product from the modified set iterators.
|
||||
debug_assert(
|
||||
current_product_.size() == sets_.size(),
|
||||
"Expected size of current_product_ to match the size of sets_");
|
||||
DCHECK(current_product_.size() == sets_.size())
|
||||
<< "Expected size of current_product_ to match the size of sets_";
|
||||
size_t i = 0;
|
||||
// Change only the prefix of the product, remaining elements (after
|
||||
// sets_it) should be the same.
|
||||
|
@ -5,8 +5,9 @@
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/hashing/fnv.hpp"
|
||||
|
||||
@ -46,7 +47,7 @@ TypedValue::TypedValue(const PropertyValue &value) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
permanent_fail("Unsupported type");
|
||||
LOG(FATAL) << "Unsupported type";
|
||||
}
|
||||
|
||||
TypedValue::TypedValue(const TypedValue &other) : type_(other.type_) {
|
||||
@ -81,7 +82,7 @@ TypedValue::TypedValue(const TypedValue &other) : type_(other.type_) {
|
||||
new (&path_v) Path(other.path_v);
|
||||
return;
|
||||
}
|
||||
permanent_fail("Unsupported TypedValue::Type");
|
||||
LOG(FATAL) << "Unsupported TypedValue::Type";
|
||||
}
|
||||
|
||||
TypedValue::operator PropertyValue() const {
|
||||
@ -186,7 +187,7 @@ std::ostream &operator<<(std::ostream &os, const TypedValue::Type type) {
|
||||
case TypedValue::Type::Path:
|
||||
return os << "path";
|
||||
}
|
||||
permanent_fail("Unsupported TypedValue::Type");
|
||||
LOG(FATAL) << "Unsupported TypedValue::Type";
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const TypedValue &value) {
|
||||
@ -219,7 +220,7 @@ std::ostream &operator<<(std::ostream &os, const TypedValue &value) {
|
||||
case TypedValue::Type::Path:
|
||||
return os << value.Value<Path>();
|
||||
}
|
||||
permanent_fail("Unsupported PropertyValue::Type");
|
||||
LOG(FATAL) << "Unsupported PropertyValue::Type";
|
||||
}
|
||||
|
||||
TypedValue &TypedValue::operator=(const TypedValue &other) {
|
||||
@ -259,7 +260,7 @@ TypedValue &TypedValue::operator=(const TypedValue &other) {
|
||||
new (&path_v) Path(other.path_v);
|
||||
return *this;
|
||||
}
|
||||
permanent_fail("Unsupported TypedValue::Type");
|
||||
LOG(FATAL) << "Unsupported TypedValue::Type";
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
@ -301,7 +302,7 @@ TypedValue::~TypedValue() {
|
||||
path_v.~Path();
|
||||
return;
|
||||
}
|
||||
permanent_fail("Unsupported TypedValue::Type");
|
||||
LOG(FATAL) << "Unsupported TypedValue::Type";
|
||||
}
|
||||
|
||||
/**
|
||||
@ -423,7 +424,7 @@ TypedValue operator==(const TypedValue &a, const TypedValue &b) {
|
||||
case TypedValue::Type::Path:
|
||||
return a.ValuePath() == b.ValuePath();
|
||||
default:
|
||||
permanent_fail("Unhandled comparison for types");
|
||||
LOG(FATAL) << "Unhandled comparison for types";
|
||||
}
|
||||
}
|
||||
|
||||
@ -619,9 +620,9 @@ bool TypedValue::BoolEqual::operator()(const TypedValue &lhs,
|
||||
case TypedValue::Type::Null:
|
||||
return false;
|
||||
default:
|
||||
permanent_fail(
|
||||
"Equality between two TypedValues resulted in something other "
|
||||
"then Null or bool");
|
||||
LOG(FATAL)
|
||||
<< "Equality between two TypedValues resulted in something other "
|
||||
"then Null or bool";
|
||||
}
|
||||
}
|
||||
|
||||
@ -662,7 +663,7 @@ size_t TypedValue::Hash::operator()(const TypedValue &value) const {
|
||||
FnvCollection<std::vector<EdgeAccessor>, EdgeAccessor>{}(
|
||||
value.ValuePath().edges());
|
||||
}
|
||||
permanent_fail("Unhandled TypedValue.type() in hash function");
|
||||
LOG(FATAL) << "Unhandled TypedValue.type() in hash function";
|
||||
}
|
||||
|
||||
} // namespace query
|
||||
|
@ -5,9 +5,9 @@
|
||||
#include <limits>
|
||||
#include <list>
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "mvcc/record.hpp"
|
||||
#include "transactions/transaction.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
/**
|
||||
* @brief - Implements deferred deletion.
|
||||
@ -31,8 +31,8 @@ class DeferredDeleter {
|
||||
* @brief - check if everything is freed
|
||||
*/
|
||||
~DeferredDeleter() {
|
||||
permanent_assert(objects_.size() == 0,
|
||||
"Objects are not freed when calling the destructor.");
|
||||
CHECK(objects_.size() == 0U)
|
||||
<< "Objects are not freed when calling the destructor.";
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3,7 +3,6 @@
|
||||
#include "database/graph_db.hpp"
|
||||
#include "storage/edge.hpp"
|
||||
#include "storage/record_accessor.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
// forward declaring the VertexAccessor because it's returned
|
||||
// by some functions
|
||||
@ -58,4 +57,4 @@ template <>
|
||||
struct hash<EdgeAccessor> {
|
||||
size_t operator()(const EdgeAccessor &e) const { return e.temporary_id(); };
|
||||
};
|
||||
}
|
||||
} // namespace std
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "database/graph_db_datatypes.hpp"
|
||||
#include "glog/logging.h"
|
||||
#include "mvcc/version_list.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
|
||||
@ -89,9 +90,10 @@ class Edges {
|
||||
* present in this iterator. */
|
||||
void update_position() {
|
||||
if (vertex_) {
|
||||
position_ = std::find_if(
|
||||
position_, end_,
|
||||
[v = this->vertex_](const Element &e) { return e.vertex == v; });
|
||||
position_ = std::find_if(position_,
|
||||
end_, [v = this->vertex_](const Element &e) {
|
||||
return e.vertex == v;
|
||||
});
|
||||
}
|
||||
if (edge_types_) {
|
||||
position_ = std::find_if(position_, end_, [this](const Element &e) {
|
||||
@ -122,8 +124,7 @@ class Edges {
|
||||
auto found = std::find_if(
|
||||
storage_.begin(), storage_.end(),
|
||||
[edge](const Element &element) { return edge == element.edge; });
|
||||
debug_assert(found != storage_.end(),
|
||||
"Removing an edge that is not present");
|
||||
DCHECK(found != storage_.end()) << "Removing an edge that is not present";
|
||||
*found = std::move(storage_.back());
|
||||
storage_.pop_back();
|
||||
}
|
||||
|
@ -5,7 +5,7 @@
|
||||
#include <iostream>
|
||||
#include <memory>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
// Value extraction template instantiations
|
||||
template <>
|
||||
@ -89,7 +89,7 @@ PropertyValue::PropertyValue(const PropertyValue &other) : type_(other.type_) {
|
||||
return;
|
||||
}
|
||||
|
||||
permanent_fail("Unsupported PropertyValue::Type");
|
||||
LOG(FATAL) << "Unsupported PropertyValue::Type";
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const PropertyValue::Type type) {
|
||||
@ -109,7 +109,7 @@ std::ostream &operator<<(std::ostream &os, const PropertyValue::Type type) {
|
||||
case PropertyValue::Type::Map:
|
||||
return os << "map";
|
||||
}
|
||||
permanent_fail("Unsupported PropertyValue::Type");
|
||||
LOG(FATAL) << "Unsupported PropertyValue::Type";
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const PropertyValue &value) {
|
||||
@ -138,7 +138,7 @@ std::ostream &operator<<(std::ostream &os, const PropertyValue &value) {
|
||||
}
|
||||
return os << "}";
|
||||
}
|
||||
permanent_fail("Unsupported PropertyValue::Type");
|
||||
LOG(FATAL) << "Unsupported PropertyValue::Type";
|
||||
}
|
||||
|
||||
PropertyValue &PropertyValue::operator=(const PropertyValue &other) {
|
||||
@ -169,7 +169,7 @@ PropertyValue &PropertyValue::operator=(const PropertyValue &other) {
|
||||
return *this;
|
||||
}
|
||||
}
|
||||
permanent_fail("Unsupported PropertyValue::Type");
|
||||
LOG(FATAL) << "Unsupported PropertyValue::Type";
|
||||
}
|
||||
|
||||
const PropertyValue PropertyValue::Null = PropertyValue();
|
||||
@ -194,5 +194,5 @@ PropertyValue::~PropertyValue() {
|
||||
map_v.~shared_ptr<std::map<std::string, PropertyValue>>();
|
||||
return;
|
||||
}
|
||||
permanent_fail("Unsupported PropertyValue::Type");
|
||||
LOG(FATAL) << "Unsupported PropertyValue::Type";
|
||||
}
|
||||
|
@ -1,6 +1,5 @@
|
||||
#pragma once
|
||||
|
||||
#include <cassert>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
|
@ -1,8 +1,9 @@
|
||||
#include "storage/record_accessor.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "database/graph_db_accessor.hpp"
|
||||
#include "storage/edge.hpp"
|
||||
#include "storage/record_accessor.hpp"
|
||||
#include "storage/vertex.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
template <typename TRecord>
|
||||
RecordAccessor<TRecord>::RecordAccessor(mvcc::VersionList<TRecord> &vlist,
|
||||
@ -51,8 +52,8 @@ RecordAccessor<TRecord> &RecordAccessor<TRecord>::SwitchNew() {
|
||||
// to the same value as it has now, and the amount of work is the
|
||||
// same as just looking for a new_ record
|
||||
if (!Reconstruct())
|
||||
debug_fail(
|
||||
"RecordAccessor::SwitchNew - accessor invalid after Reconstruct");
|
||||
DLOG(FATAL)
|
||||
<< "RecordAccessor::SwitchNew - accessor invalid after Reconstruct";
|
||||
}
|
||||
current_ = new_ ? new_ : old_;
|
||||
return *this;
|
||||
@ -72,14 +73,13 @@ bool RecordAccessor<TRecord>::Reconstruct() {
|
||||
template <typename TRecord>
|
||||
TRecord &RecordAccessor<TRecord>::update() {
|
||||
db_accessor().Update(*this);
|
||||
debug_assert(new_ != nullptr, "RecordAccessor.new_ is null after update");
|
||||
DCHECK(new_ != nullptr) << "RecordAccessor.new_ is null after update";
|
||||
return *new_;
|
||||
}
|
||||
|
||||
template <typename TRecord>
|
||||
const TRecord &RecordAccessor<TRecord>::current() const {
|
||||
debug_assert(current_ != nullptr,
|
||||
"RecordAccessor.current_ pointer is nullptr");
|
||||
DCHECK(current_ != nullptr) << "RecordAccessor.current_ pointer is nullptr";
|
||||
return *current_;
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "storage/property_value.hpp"
|
||||
#include "utils/total_ordering.hpp"
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "storage/property_value_store.hpp"
|
||||
|
||||
class GraphDbAccessor;
|
||||
@ -88,14 +89,14 @@ class RecordAccessor : public TotalOrdering<RecordAccessor<TRecord>> {
|
||||
* not actual values inside RecordAccessors.
|
||||
*/
|
||||
bool operator<(const RecordAccessor &other) const {
|
||||
debug_assert(db_accessor_ == other.db_accessor_,
|
||||
"Not in the same transaction.");
|
||||
DCHECK(db_accessor_ == other.db_accessor_)
|
||||
<< "Not in the same transaction.";
|
||||
return vlist_ < other.vlist_;
|
||||
}
|
||||
|
||||
bool operator==(const RecordAccessor &other) const {
|
||||
debug_assert(db_accessor_ == other.db_accessor_,
|
||||
"Not in the same transaction.");
|
||||
DCHECK(db_accessor_ == other.db_accessor_)
|
||||
<< "Not in the same transaction.";
|
||||
return vlist_ == other.vlist_;
|
||||
}
|
||||
|
||||
|
@ -9,15 +9,16 @@
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "threading/sync/lock_timeout_exception.hpp"
|
||||
#include "glog/logging.h"
|
||||
#include "threading/sync/cpu_relax.hpp"
|
||||
#include "threading/sync/lock_timeout_exception.hpp"
|
||||
|
||||
namespace sys {
|
||||
inline int futex(void *addr1, int op, int val1, const struct timespec *timeout,
|
||||
void *addr2, int val3) {
|
||||
return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
|
||||
};
|
||||
}
|
||||
} // namespace sys
|
||||
|
||||
class Futex {
|
||||
using futex_t = uint32_t;
|
||||
|
@ -1,8 +1,8 @@
|
||||
#include "glog/logging.h"
|
||||
#include "thread.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
Thread::Thread(Thread &&other) {
|
||||
debug_assert(thread_id == UNINITIALIZED, "Thread was initialized before.");
|
||||
DCHECK(thread_id == UNINITIALIZED) << "Thread was initialized before.";
|
||||
thread_id = other.thread_id;
|
||||
thread = std::move(other.thread);
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <limits>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "threading/sync/lockable.hpp"
|
||||
#include "threading/sync/spinlock.hpp"
|
||||
#include "transactions/commit_log.hpp"
|
||||
@ -63,12 +64,12 @@ class Engine : Lockable<SpinLock> {
|
||||
auto guard = this->acquire_unique();
|
||||
|
||||
auto *t = store_.get(id);
|
||||
debug_assert(t != nullptr,
|
||||
"Transaction::advance on non-existing transaction");
|
||||
DCHECK(t != nullptr) << "Transaction::advance on non-existing transaction";
|
||||
|
||||
if (t->cid_ == kMaxCommandId)
|
||||
throw TransactionError(
|
||||
"Reached maximum number of commands in this transaction.");
|
||||
"Reached maximum number of commands in this "
|
||||
"transaction.");
|
||||
|
||||
t->cid_++;
|
||||
return *t;
|
||||
@ -177,4 +178,4 @@ class Engine : Lockable<SpinLock> {
|
||||
ConcurrentMap<transaction_id_t, transaction_id_t> lock_graph_;
|
||||
std::atomic<transaction_id_t> counter_{0};
|
||||
};
|
||||
}
|
||||
} // namespace tx
|
||||
|
@ -2,10 +2,10 @@
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "glog/logging.h"
|
||||
#include "storage/locking/lock_status.hpp"
|
||||
#include "storage/locking/record_lock.hpp"
|
||||
#include "transactions/type.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
namespace tx {
|
||||
|
||||
@ -19,7 +19,7 @@ class LockStore {
|
||||
|
||||
LockHolder(RecordLock *lock, const Transaction &tx, tx::Engine &engine)
|
||||
: lock_(lock) {
|
||||
debug_assert(lock != nullptr, "Lock is nullptr.");
|
||||
DCHECK(lock != nullptr) << "Lock is nullptr.";
|
||||
auto status = lock_->Lock(tx, engine);
|
||||
|
||||
if (status != LockStatus::Acquired) {
|
||||
@ -64,4 +64,4 @@ class LockStore {
|
||||
private:
|
||||
std::vector<LockHolder> locks_;
|
||||
};
|
||||
}
|
||||
} // namespace tx
|
||||
|
@ -4,9 +4,9 @@
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "transactions/type.hpp"
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
namespace tx {
|
||||
|
||||
@ -42,9 +42,8 @@ class Snapshot {
|
||||
*/
|
||||
void insert(transaction_id_t id) {
|
||||
transaction_ids_.push_back(id);
|
||||
debug_assert(
|
||||
std::is_sorted(transaction_ids_.begin(), transaction_ids_.end()),
|
||||
"Snapshot must be sorted");
|
||||
DCHECK(std::is_sorted(transaction_ids_.begin(), transaction_ids_.end()))
|
||||
<< "Snapshot must be sorted";
|
||||
}
|
||||
|
||||
/** Removes the given transaction id from this Snapshot.
|
||||
@ -58,12 +57,12 @@ class Snapshot {
|
||||
}
|
||||
|
||||
transaction_id_t front() const {
|
||||
debug_assert(transaction_ids_.size(), "Snapshot.front() on empty Snapshot");
|
||||
DCHECK(transaction_ids_.size()) << "Snapshot.front() on empty Snapshot";
|
||||
return transaction_ids_.front();
|
||||
}
|
||||
|
||||
transaction_id_t back() const {
|
||||
debug_assert(transaction_ids_.size(), "Snapshot.back() on empty Snapshot");
|
||||
DCHECK(transaction_ids_.size()) << "Snapshot.back() on empty Snapshot";
|
||||
return transaction_ids_.back();
|
||||
}
|
||||
|
||||
@ -88,4 +87,4 @@ class Snapshot {
|
||||
private:
|
||||
std::vector<transaction_id_t> transaction_ids_;
|
||||
};
|
||||
}
|
||||
} // namespace tx
|
||||
|
@ -20,89 +20,6 @@
|
||||
std::cerr << "ASSERT: " << message << std::endl; \
|
||||
std::cerr << stacktrace.dump();
|
||||
#else
|
||||
#define __handle_assert_message(message) \
|
||||
#define __handle_assert_message(message) \
|
||||
std::cerr << "ASSERT: " << message << std::endl;
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Always check that the condition is satisfied, otherwise abort the program.
|
||||
*
|
||||
* Unlike @c debug_assert, @c permanent_assert is always active. A good use-case
|
||||
* for this type of assert is during unit testing, because assert has to be
|
||||
* active regardless of the build type.
|
||||
*
|
||||
* @param condition Expression which has to evaluate to @c true.
|
||||
* @param message Message that is to be displayed before aborting, if the
|
||||
* evaluated @c condition is @c false.
|
||||
*
|
||||
* @sa permanent_fail
|
||||
* @sa debug_assert
|
||||
* @sa debug_fail
|
||||
*/
|
||||
#define permanent_assert(condition, message) \
|
||||
if (!(condition)) { \
|
||||
std::ostringstream s; \
|
||||
s << message; \
|
||||
__handle_assert_message(s.str()); \
|
||||
std::abort(); \
|
||||
}
|
||||
|
||||
/**
|
||||
* Always abort the program with given message.
|
||||
*
|
||||
* Unlike @c debug_fail, @c permanent_fail is always active. This should be used
|
||||
* like @c permanent_assert, but when the condition cannot be a simple
|
||||
* expression.
|
||||
*
|
||||
* @param message Message to display before aborting.
|
||||
*
|
||||
* @sa permanent_assert
|
||||
* @sa debug_assert
|
||||
* @sa debug_fail
|
||||
*/
|
||||
#define permanent_fail(message) \
|
||||
{ \
|
||||
std::ostringstream s; \
|
||||
s << message; \
|
||||
__handle_assert_message(s.str()); \
|
||||
std::abort(); \
|
||||
}
|
||||
|
||||
/**
|
||||
* @def debug_assert(condition, message)
|
||||
* Check that the condition is satisfied, otherwise abort the program.
|
||||
*
|
||||
* This is like @c permanent_assert, but the @c NDEBUG define controls
|
||||
* whether this assertion is active. With this define, @c debug_assert will do
|
||||
* nothing. Therefore, this is more like the standard C @c assert facility and
|
||||
* it should be used as such. For example, validating pre and post conditions of
|
||||
* a function.
|
||||
*
|
||||
* @sa debug_fail
|
||||
* @sa permanent_assert
|
||||
* @sa permanent_fail
|
||||
*/
|
||||
|
||||
/**
|
||||
* @def debug_fail(message)
|
||||
* Abort the program with given message.
|
||||
*
|
||||
* This is like @c permanent_fail, but the @c NDEBUG define controls
|
||||
* whether this assertion is active. With this define, @c debug_fail will do
|
||||
* nothing. This should be used like @c debug_assert, but when the condition
|
||||
* cannot be a simple expression.
|
||||
*
|
||||
* @sa debug_assert
|
||||
* @sa permanent_assert
|
||||
* @sa permanent_fail
|
||||
*/
|
||||
|
||||
#ifndef NDEBUG
|
||||
#define debug_assert(condition, message) permanent_assert(condition, message)
|
||||
#define debug_fail(message) permanent_fail(message)
|
||||
#else
|
||||
#define debug_assert(condition, message) \
|
||||
{}
|
||||
#define debug_fail(message) \
|
||||
{}
|
||||
#endif
|
||||
|
@ -25,7 +25,6 @@ namespace fs = std::experimental::filesystem;
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include "utils/algorithm.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/exceptions.hpp"
|
||||
#include "utils/likely.hpp"
|
||||
#include "utils/underlying_cast.hpp"
|
||||
@ -45,7 +44,7 @@ void set_non_blocking(int fd) {
|
||||
if (UNLIKELY(status == -1))
|
||||
throw BasicException("Can't set NON_BLOCK flag to file descriptor");
|
||||
}
|
||||
}
|
||||
} // namespace linux_os
|
||||
|
||||
/**
|
||||
* Goes from first to last item in a container, if an element satisfying the
|
||||
@ -149,8 +148,7 @@ struct FSEventBase {
|
||||
struct WatchDescriptor : public FSEventBase {
|
||||
WatchDescriptor(const fs::path &directory, const FSEventType type)
|
||||
: FSEventBase(directory, type) {
|
||||
debug_assert(fs::is_directory(path),
|
||||
"The path parameter should be directory");
|
||||
DCHECK(fs::is_directory(path)) << "The path parameter should be directory";
|
||||
}
|
||||
};
|
||||
|
||||
@ -356,10 +354,10 @@ class FSWatcher {
|
||||
// TODO: figure out why (it is not easy)
|
||||
if (((p - buffer_) + in_event_length) > IN_BUFF_LEN) break;
|
||||
// here should be an assertion
|
||||
// debug_assert(in_event_length <= IN_BUFF_SLOT_LEN,
|
||||
// DCHECK(in_event_length <= IN_BUFF_SLOT_LEN) <<
|
||||
// "Inotify event length cannot be bigger
|
||||
// than "
|
||||
// "Inotify slot length");
|
||||
// "Inotify slot length";
|
||||
|
||||
// skip if in_event is undefined OR is equal to IN_IGNORED
|
||||
if ((in_event->len == 0 && in_event->mask == 0) ||
|
||||
@ -471,4 +469,4 @@ class FSWatcher {
|
||||
*/
|
||||
char *buffer_[IN_BUFF_LEN];
|
||||
};
|
||||
}
|
||||
} // namespace utils
|
||||
|
@ -1,9 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <ext/aligned_buffer.h>
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <utility>
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
// Optional object storage. It maybe has and maybe
|
||||
// dosent have objet of type T.
|
||||
@ -85,7 +86,7 @@ class Option {
|
||||
bool is_present() const { return initialized; }
|
||||
|
||||
T &get() noexcept {
|
||||
debug_assert(initialized, "Not initialized.");
|
||||
DCHECK(initialized) << "Not initialized.";
|
||||
return *data._M_ptr();
|
||||
}
|
||||
|
||||
@ -107,7 +108,7 @@ class Option {
|
||||
}
|
||||
|
||||
const T &get() const noexcept {
|
||||
debug_assert(initialized, "Not initialized.");
|
||||
DCHECK(initialized) << "Not initialized.";
|
||||
return *data._M_ptr();
|
||||
}
|
||||
|
||||
@ -148,7 +149,7 @@ class Option {
|
||||
}
|
||||
|
||||
T take() {
|
||||
debug_assert(initialized, "Not initialized.");
|
||||
DCHECK(initialized) << "Not initialized.";
|
||||
initialized = false;
|
||||
return std::move(*data._M_ptr());
|
||||
}
|
||||
|
@ -2,20 +2,20 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include <ext/aligned_buffer.h>
|
||||
|
||||
/**
|
||||
* @class Placeholder
|
||||
*
|
||||
* @brief
|
||||
* Placeholder is used to allocate memory for an object on heap providing
|
||||
* methods for setting and getting the object and making sure that the
|
||||
* object is initialized.
|
||||
*
|
||||
* @tparam T type of object to be wrapped in the placeholder
|
||||
*/
|
||||
* @class Placeholder
|
||||
*
|
||||
* @brief
|
||||
* Placeholder is used to allocate memory for an object on heap providing
|
||||
* methods for setting and getting the object and making sure that the
|
||||
* object is initialized.
|
||||
*
|
||||
* @tparam T type of object to be wrapped in the placeholder
|
||||
*/
|
||||
|
||||
template <class T>
|
||||
class Placeholder {
|
||||
@ -38,7 +38,7 @@ class Placeholder {
|
||||
bool is_initialized() { return initialized; }
|
||||
|
||||
T &get() noexcept {
|
||||
debug_assert(initialized, "Placeholder object not initialized");
|
||||
DCHECK(initialized) << "Placeholder object not initialized";
|
||||
return *data._M_ptr();
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ class Placeholder {
|
||||
* @return const reference to object.
|
||||
*/
|
||||
const T &get() const noexcept {
|
||||
debug_assert(initialized, "Placeholder object not initialized");
|
||||
DCHECK(initialized) << "Placeholder object not initialized";
|
||||
return *data._M_ptr();
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ class Placeholder {
|
||||
* @param T& item reference to the item initialized in allocated memory
|
||||
*/
|
||||
void set(const T &item) {
|
||||
debug_assert(!initialized, "Placeholder object already initialized");
|
||||
DCHECK(!initialized) << "Placeholder object already initialized";
|
||||
new (data._M_addr()) T(item);
|
||||
initialized = true;
|
||||
}
|
||||
@ -67,7 +67,7 @@ class Placeholder {
|
||||
* @param T&& rvalue reference to the item which is moved to allocated memory
|
||||
*/
|
||||
void set(T &&item) {
|
||||
debug_assert(!initialized, "Placeholder object already initialized");
|
||||
DCHECK(!initialized) << "Placeholder object already initialized";
|
||||
new (data._M_addr()) T(std::move(item));
|
||||
initialized = true;
|
||||
}
|
||||
@ -81,7 +81,7 @@ class Placeholder {
|
||||
*/
|
||||
template <class... Args>
|
||||
void emplace(Args &&... args) {
|
||||
debug_assert(!initialized, "Placeholder object already initialized");
|
||||
DCHECK(!initialized) << "Placeholder object already initialized";
|
||||
new (data._M_addr()) T(args...);
|
||||
initialized = true;
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ namespace utils {
|
||||
* in the [from, to) range.
|
||||
*/
|
||||
auto RandomIntGenerator(int from, int to) {
|
||||
permanent_assert(from < to, "Must have from < to");
|
||||
CHECK(from < to) << "Must have from < to";
|
||||
int range = to - from;
|
||||
return [from, range]() -> int { return rand() % range + from; };
|
||||
}
|
||||
@ -119,8 +119,8 @@ class RandomGraphGenerator {
|
||||
auto from =
|
||||
dba.Transfer(vertices_from[rand() % vertices_from.size()]);
|
||||
auto to = dba.Transfer(vertices_to[rand() % vertices_to.size()]);
|
||||
debug_assert(from, "From not visible in current GraphDbAccessor");
|
||||
debug_assert(to, "From not visible in current GraphDbAccessor");
|
||||
DCHECK(from) << "From not visible in current GraphDbAccessor";
|
||||
DCHECK(to) << "From not visible in current GraphDbAccessor";
|
||||
dba.InsertEdge(from.value(), to.value(), edge_type);
|
||||
NotifyProgressListeners();
|
||||
},
|
||||
@ -195,7 +195,7 @@ class RandomGraphGenerator {
|
||||
*/
|
||||
void Map(std::function<void(GraphDbAccessor &)> f, int count,
|
||||
int thread_count, int elements_per_commit) {
|
||||
debug_assert(thread_count > 0, "Can't work on less then 1 thread");
|
||||
DCHECK(thread_count > 0) << "Can't work on less then 1 thread";
|
||||
|
||||
// split count across thread_count
|
||||
int count_per_thread = count / thread_count;
|
||||
@ -228,4 +228,4 @@ class RandomGraphGenerator {
|
||||
for (auto &thread : threads) thread.join();
|
||||
}
|
||||
};
|
||||
}
|
||||
} // namespace utils
|
||||
|
@ -4,9 +4,10 @@
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <ctime>
|
||||
#include <functional>
|
||||
#include <thread>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "glog/logging.h"
|
||||
|
||||
/**
|
||||
* Class used to run scheduled function execution.
|
||||
@ -25,8 +26,8 @@ class Scheduler {
|
||||
template <typename TRep, typename TPeriod>
|
||||
void Run(const std::chrono::duration<TRep, TPeriod> &pause,
|
||||
const std::function<void()> &f) {
|
||||
debug_assert(is_working_ == false, "Thread already running.");
|
||||
debug_assert(pause > std::chrono::seconds(0), "Pause is invalid.");
|
||||
DCHECK(is_working_ == false) << "Thread already running.";
|
||||
DCHECK(pause > std::chrono::seconds(0)) << "Pause is invalid.";
|
||||
|
||||
is_working_ = true;
|
||||
thread_ = std::thread([this, pause, f]() {
|
||||
|
@ -1,3 +1,4 @@
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include "data_structures/concurrent/concurrent_map.hpp"
|
||||
#include "data_structures/concurrent/concurrent_set.hpp"
|
||||
#include "data_structures/concurrent/skiplist.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
// NOTE: this file is highly coupled to data_structures
|
||||
// TODO: REFACTOR
|
||||
@ -44,8 +43,7 @@ template <typename S>
|
||||
void check_present_same(typename S::Accessor &acc, size_t data,
|
||||
std::vector<size_t> &owned) {
|
||||
for (auto num : owned) {
|
||||
permanent_assert(acc.find(num)->second == data,
|
||||
"My data is present and my");
|
||||
CHECK(acc.find(num)->second == data) << "My data is present and my";
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,8 +59,8 @@ template <typename S>
|
||||
void check_size_list(S &acc, long long size) {
|
||||
// check size
|
||||
|
||||
permanent_assert(acc.size() == size,
|
||||
"Size should be " << size << ", but size is " << acc.size());
|
||||
CHECK(acc.size() == size)
|
||||
<< "Size should be " << size << ", but size is " << acc.size();
|
||||
|
||||
// check count
|
||||
|
||||
@ -71,16 +69,16 @@ void check_size_list(S &acc, long long size) {
|
||||
for ([[gnu::unused]] auto elem : acc) {
|
||||
++iterator_counter;
|
||||
}
|
||||
permanent_assert(static_cast<int64_t>(iterator_counter) == size,
|
||||
"Iterator count should be " << size << ", but size is "
|
||||
<< iterator_counter);
|
||||
CHECK(static_cast<int64_t>(iterator_counter) == size)
|
||||
<< "Iterator count should be " << size << ", but size is "
|
||||
<< iterator_counter;
|
||||
}
|
||||
template <typename S>
|
||||
void check_size(typename S::Accessor &acc, long long size) {
|
||||
// check size
|
||||
|
||||
permanent_assert(acc.size() == size,
|
||||
"Size should be " << size << ", but size is " << acc.size());
|
||||
CHECK(acc.size() == size)
|
||||
<< "Size should be " << size << ", but size is " << acc.size();
|
||||
|
||||
// check count
|
||||
|
||||
@ -89,9 +87,9 @@ void check_size(typename S::Accessor &acc, long long size) {
|
||||
for ([[gnu::unused]] auto elem : acc) {
|
||||
++iterator_counter;
|
||||
}
|
||||
permanent_assert(static_cast<int64_t>(iterator_counter) == size,
|
||||
"Iterator count should be " << size << ", but size is "
|
||||
<< iterator_counter);
|
||||
CHECK(static_cast<int64_t>(iterator_counter) == size)
|
||||
<< "Iterator count should be " << size << ", but size is "
|
||||
<< iterator_counter;
|
||||
}
|
||||
|
||||
// Checks if order in list is maintened. It expects map
|
||||
@ -110,16 +108,14 @@ void check_order(typename S::Accessor &acc) {
|
||||
|
||||
void check_zero(size_t key_range, long array[], const char *str) {
|
||||
for (int i = 0; i < static_cast<int>(key_range); i++) {
|
||||
permanent_assert(array[i] == 0,
|
||||
str << " doesn't hold it's guarantees. It has " << array[i]
|
||||
<< " extra elements.");
|
||||
CHECK(array[i] == 0) << str << " doesn't hold it's guarantees. It has "
|
||||
<< array[i] << " extra elements.";
|
||||
}
|
||||
}
|
||||
|
||||
void check_set(DynamicBitset<> &db, std::vector<bool> &set) {
|
||||
for (int i = 0; i < static_cast<int>(set.size()); i++) {
|
||||
permanent_assert(!(set[i] ^ db.at(i)),
|
||||
"Set constraints aren't fullfilled.");
|
||||
CHECK(!(set[i] ^ db.at(i))) << "Set constraints aren't fullfilled.";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,7 @@ constexpr size_t no_insert_for_one_delete = 1;
|
||||
int main(int argc, char **argv) {
|
||||
google::InitGoogleLogging(argv[0]);
|
||||
ConcurrentList<std::pair<int, int>> list;
|
||||
permanent_assert(list.size() == 0, "The list isn't empty");
|
||||
CHECK(list.size() == 0) << "The list isn't empty";
|
||||
|
||||
auto futures =
|
||||
run<std::pair<long long, long long>>(THREADS_NO, [&](auto index) mutable {
|
||||
@ -47,7 +47,7 @@ int main(int argc, char **argv) {
|
||||
} else {
|
||||
for (auto &v : list) {
|
||||
if (v.first == num) {
|
||||
permanent_assert(v.second == data, "Data is invalid");
|
||||
CHECK(v.second == data) << "Data is invalid";
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -69,7 +69,7 @@ int main(int argc, char **argv) {
|
||||
sums -= e.second;
|
||||
}
|
||||
|
||||
permanent_assert(sums == 0, "Same values aren't present");
|
||||
CHECK(sums == 0) << "Same values aren't present";
|
||||
check_size_list<ConcurrentList<std::pair<int, int>>>(list, counters);
|
||||
|
||||
std::this_thread::sleep_for(1s);
|
||||
|
@ -23,9 +23,8 @@ void test_lock(int) {
|
||||
std::unique_lock<Futex> guard(futex);
|
||||
x++;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(dis(gen)));
|
||||
permanent_assert(x == 1,
|
||||
"Other thread shouldn't be able to "
|
||||
"change the value of x");
|
||||
CHECK(x == 1) << "Other thread shouldn't be able to "
|
||||
"change the value of x";
|
||||
x--;
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(dis(gen)));
|
||||
|
@ -30,8 +30,8 @@ int main(int, char **argv) {
|
||||
// get skiplist size
|
||||
{
|
||||
auto accessor = skiplist.access();
|
||||
permanent_assert(accessor.size() == THREADS_NO * elems_per_thread,
|
||||
"all elements in skiplist");
|
||||
CHECK(accessor.size() == THREADS_NO * elems_per_thread)
|
||||
<< "all elements in skiplist";
|
||||
}
|
||||
|
||||
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
|
||||
@ -39,7 +39,7 @@ int main(int, char **argv) {
|
||||
[&skiplist](size_t start, size_t end) {
|
||||
auto accessor = skiplist.access();
|
||||
for (size_t elem_i = start; elem_i < end; ++elem_i) {
|
||||
permanent_assert(accessor.remove(elem_i) == true, "");
|
||||
CHECK(accessor.remove(elem_i) == true) << "";
|
||||
}
|
||||
},
|
||||
thread_i * elems_per_thread,
|
||||
@ -53,8 +53,8 @@ int main(int, char **argv) {
|
||||
// check size
|
||||
{
|
||||
auto accessor = skiplist.access();
|
||||
permanent_assert(accessor.size() == 0, "Size should be 0, but size is "
|
||||
<< accessor.size());
|
||||
CHECK(accessor.size() == 0)
|
||||
<< "Size should be 0, but size is " << accessor.size();
|
||||
}
|
||||
|
||||
// check count
|
||||
@ -65,11 +65,12 @@ int main(int, char **argv) {
|
||||
++iterator_counter;
|
||||
cout << elem.first << " ";
|
||||
}
|
||||
permanent_assert(iterator_counter == 0, "deleted elements");
|
||||
CHECK(iterator_counter == 0) << "deleted elements";
|
||||
}
|
||||
|
||||
{
|
||||
auto accessor = skiplist.access();
|
||||
check_order<map_t>(accessor);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -35,8 +35,8 @@ int main(int, char **argv) {
|
||||
// get skiplist size
|
||||
{
|
||||
auto accessor = skiplist.access();
|
||||
permanent_assert(accessor.size() == THREADS_NO * elems_per_thread,
|
||||
"all elements in skiplist");
|
||||
CHECK(accessor.size() == THREADS_NO * elems_per_thread)
|
||||
<< "all elements in skiplist";
|
||||
}
|
||||
|
||||
for (size_t thread_i = 0; thread_i < THREADS_NO; ++thread_i) {
|
||||
@ -44,7 +44,7 @@ int main(int, char **argv) {
|
||||
[&skiplist](size_t start, size_t end) {
|
||||
auto accessor = skiplist.access();
|
||||
for (size_t elem_i = start; elem_i < end; ++elem_i) {
|
||||
permanent_assert(accessor.remove(elem_i) == true, "");
|
||||
CHECK(accessor.remove(elem_i) == true) << "";
|
||||
}
|
||||
},
|
||||
thread_i * elems_per_thread,
|
||||
@ -58,8 +58,8 @@ int main(int, char **argv) {
|
||||
// check size
|
||||
{
|
||||
auto accessor = skiplist.access();
|
||||
permanent_assert(accessor.size() == 0, "Size should be 0, but size is "
|
||||
<< accessor.size());
|
||||
CHECK(accessor.size() == 0)
|
||||
<< "Size should be 0, but size is " << accessor.size();
|
||||
}
|
||||
|
||||
// check count
|
||||
@ -70,6 +70,7 @@ int main(int, char **argv) {
|
||||
++iterator_counter;
|
||||
cout << elem.first << " ";
|
||||
}
|
||||
permanent_assert(iterator_counter == 0, "deleted elements");
|
||||
CHECK(iterator_counter == 0) << "deleted elements";
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -58,7 +58,8 @@ int main(int argc, char **argv) {
|
||||
for (auto &e : accessor) {
|
||||
sums -= e.second;
|
||||
}
|
||||
permanent_assert(sums == 0, "Aproximetly Same values are present");
|
||||
CHECK(sums == 0) << "Aproximetly Same values are present";
|
||||
check_size<map_t>(accessor, counters);
|
||||
check_order<map_t>(accessor);
|
||||
return 0;
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ int main(int argc, char **argv) {
|
||||
do {
|
||||
if (owned.size() != 0 && rand_op()) {
|
||||
auto rem = rand() % owned.size();
|
||||
permanent_assert(acc.remove(owned[rem]), "Owned data removed");
|
||||
CHECK(acc.remove(owned[rem])) << "Owned data removed";
|
||||
owned.erase(owned.begin() + rem);
|
||||
downcount--;
|
||||
} else {
|
||||
@ -46,4 +46,5 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
check_size<map_t>(accessor, count);
|
||||
check_order<map_t>(accessor);
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,7 +56,8 @@ int main(int argc, char **argv) {
|
||||
for (auto &e : accessor) {
|
||||
sums -= e.second;
|
||||
}
|
||||
permanent_assert(sums == 0, "Aproximetly Same values are present");
|
||||
CHECK(sums == 0) << "Aproximetly Same values are present";
|
||||
check_size<map_t>(accessor, counters);
|
||||
check_order<map_t>(accessor);
|
||||
return 0;
|
||||
}
|
||||
|
@ -50,9 +50,9 @@ int main(int argc, char **argv) {
|
||||
|
||||
auto accessor = skiplist.access();
|
||||
for (int i = 0; i < key_range; i++) {
|
||||
permanent_assert(set[i] == 0 || set[i] == 1 ||
|
||||
(set[i] == 1) ^ accessor.contains(std::to_string(i)),
|
||||
"Set doesn't hold it's guarantees.");
|
||||
CHECK(set[i] == 0 || set[i] == 1 ||
|
||||
(set[i] == 1) ^ accessor.contains(std::to_string(i)))
|
||||
<< "Set doesn't hold it's guarantees.";
|
||||
}
|
||||
|
||||
for (auto &e : accessor) {
|
||||
@ -60,4 +60,5 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
|
||||
check_zero(key_range, set, "Set");
|
||||
return 0;
|
||||
}
|
||||
|
@ -43,8 +43,8 @@ int main(int argc, char **argv) {
|
||||
}
|
||||
} else {
|
||||
auto value = acc.find(num);
|
||||
permanent_assert(value == acc.end() || value->second == data,
|
||||
"Data is invalid");
|
||||
CHECK(value == acc.end() || value->second == data)
|
||||
<< "Data is invalid";
|
||||
}
|
||||
}
|
||||
|
||||
@ -62,7 +62,7 @@ int main(int argc, char **argv) {
|
||||
for (auto &e : accessor) {
|
||||
sums -= e.second;
|
||||
}
|
||||
permanent_assert(sums == 0, "Same values aren't present");
|
||||
CHECK(sums == 0) << "Same values aren't present";
|
||||
check_size<map_t>(accessor, counters);
|
||||
check_order<map_t>(accessor);
|
||||
}
|
||||
|
@ -4,8 +4,9 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "threading/sync/spinlock.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
int x = 0;
|
||||
SpinLock lock;
|
||||
@ -19,10 +20,8 @@ void test_lock() {
|
||||
|
||||
std::this_thread::sleep_for(25ms);
|
||||
|
||||
permanent_assert(
|
||||
x < 2,
|
||||
"x always has to be less than 2 (other "
|
||||
"threads shouldn't be able to change the x simultaneously");
|
||||
CHECK(x < 2) << "x always has to be less than 2 (other "
|
||||
"threads shouldn't be able to change the x simultaneously";
|
||||
x--;
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "transactions/engine.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
int main() {
|
||||
// (try to) test correctness of the transaction life cycle
|
||||
@ -41,5 +42,6 @@ int main() {
|
||||
for (uint64_t i = 1; i <= THREADS * TRANSACTIONS; ++i) sum_actual += i;
|
||||
|
||||
std::cout << sum_computed << " " << sum_actual << std::endl;
|
||||
permanent_assert(sum_computed == sum_actual, "sums have to be the same");
|
||||
CHECK(sum_computed == sum_actual) << "sums have to be the same";
|
||||
return 0;
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ class GraphState {
|
||||
// Gets the ID of a random node that has the given label.
|
||||
int64_t RandomNode(const std::string &label) {
|
||||
auto found = label_nodes_.find(label);
|
||||
permanent_assert(found != label_nodes_.end(), "Label not found");
|
||||
CHECK(found != label_nodes_.end()) << "Label not found";
|
||||
return found->second[rand_(gen_) * found->second.size()];
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ class ValueGenerator {
|
||||
std::unordered_map<std::string, query::TypedValue> props;
|
||||
if (config.is_null()) return props;
|
||||
|
||||
permanent_assert(config.is_object(), "Properties config must be a dict");
|
||||
CHECK(config.is_object()) << "Properties config must be a dict";
|
||||
for (auto it = config.begin(); it != config.end(); it++) {
|
||||
auto value = MakeValue(it.value());
|
||||
if (value) props.emplace(it.key(), *value);
|
||||
@ -193,7 +193,7 @@ class ValueGenerator {
|
||||
else if (type == "randstring")
|
||||
return TypedValue(RandString(param));
|
||||
else
|
||||
permanent_fail("Unknown value type");
|
||||
LOG(FATAL) << "Unknown value type";
|
||||
} else
|
||||
return Primitive(config);
|
||||
}
|
||||
@ -204,7 +204,7 @@ class ValueGenerator {
|
||||
if (config.is_number_float()) return config.get<double>();
|
||||
if (config.is_boolean()) return config.get<bool>();
|
||||
|
||||
permanent_fail("Unsupported primitive type");
|
||||
LOG(FATAL) << "Unsupported primitive type";
|
||||
}
|
||||
|
||||
int64_t Counter(const std::string &name) {
|
||||
@ -218,12 +218,11 @@ class ValueGenerator {
|
||||
}
|
||||
|
||||
int64_t RandInt(const json &range) {
|
||||
permanent_assert(range.is_array() && range.size() == 2,
|
||||
"RandInt value gen config must be a list with 2 elements");
|
||||
CHECK(range.is_array() && range.size() == 2)
|
||||
<< "RandInt value gen config must be a list with 2 elements";
|
||||
auto from = MakeValue(range[0])->ValueInt();
|
||||
auto to = MakeValue(range[1])->ValueInt();
|
||||
permanent_assert(from < to,
|
||||
"RandInt lower range must be lesser then upper range");
|
||||
CHECK(from < to) << "RandInt lower range must be lesser then upper range";
|
||||
return (int64_t)(rand_(gen_) * (to - from)) + from;
|
||||
}
|
||||
|
||||
@ -244,9 +243,8 @@ class ValueGenerator {
|
||||
bool Bernoulli(double p) { return rand_(gen_) < p; }
|
||||
|
||||
std::experimental::optional<TypedValue> Optional(const json &config) {
|
||||
permanent_assert(
|
||||
config.is_array() && config.size() == 2,
|
||||
"Optional value gen config must be a list with 2 elements");
|
||||
CHECK(config.is_array() && config.size() == 2)
|
||||
<< "Optional value gen config must be a list with 2 elements";
|
||||
return Bernoulli(config[0]) ? MakeValue(config[1])
|
||||
: std::experimental::nullopt;
|
||||
}
|
||||
@ -285,18 +283,16 @@ int main(int argc, char **argv) {
|
||||
|
||||
// Create nodes
|
||||
const auto &nodes_config = config["nodes"];
|
||||
permanent_assert(
|
||||
nodes_config.is_array() && nodes_config.size() > 0,
|
||||
"Generator config must have 'nodes' array with at least one element");
|
||||
CHECK(nodes_config.is_array() && nodes_config.size() > 0)
|
||||
<< "Generator config must have 'nodes' array with at least one element";
|
||||
for (const auto &node_config : config["nodes"]) {
|
||||
permanent_assert(node_config.is_object(), "Node config must be a dict");
|
||||
CHECK(node_config.is_object()) << "Node config must be a dict";
|
||||
|
||||
for (int i = 0; i < node_config["count"]; i++) {
|
||||
const auto &labels_config = node_config["labels"];
|
||||
permanent_assert(labels_config.is_array(),
|
||||
"Must provide an array of node labels");
|
||||
permanent_assert(node_config.size() > 0,
|
||||
"Node labels array must contain at lest one element");
|
||||
CHECK(labels_config.is_array()) << "Must provide an array of node labels";
|
||||
CHECK(node_config.size() > 0)
|
||||
<< "Node labels array must contain at lest one element";
|
||||
auto node_bolt_id = writer.WriteNode(
|
||||
labels_config,
|
||||
value_generator.MakeProperties(node_config["properties"]));
|
||||
@ -308,7 +304,7 @@ int main(int argc, char **argv) {
|
||||
|
||||
// Create edges
|
||||
for (const auto &edge_config : config["edges"]) {
|
||||
permanent_assert(edge_config.is_object(), "Edge config must be a dict");
|
||||
CHECK(edge_config.is_object()) << "Edge config must be a dict";
|
||||
const std::string &from = edge_config["from"];
|
||||
const std::string &to = edge_config["to"];
|
||||
for (int i = 0; i < edge_config["count"]; i++)
|
||||
|
@ -7,10 +7,11 @@
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include <sys/ioctl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "utils/assert.hpp"
|
||||
#include "utils/random/xorshift128plus.hpp"
|
||||
|
||||
static thread_local Xorshift128plus rnd;
|
||||
@ -44,7 +45,7 @@ int main(void) {
|
||||
auto max = std::accumulate(
|
||||
buckets.begin(), buckets.end(), 0u,
|
||||
[](auto& acc, auto& x) { return std::max(acc, x.load()); });
|
||||
debug_assert(max != 0u, "max is 0.");
|
||||
DCHECK(max != 0u) << "max is 0.";
|
||||
|
||||
std::cout << std::fixed;
|
||||
|
||||
|
@ -125,7 +125,7 @@ class GraphSession {
|
||||
Execute(fmt::format("UNWIND RANGE(1, {}) AS r CREATE (n:{} {{id: "
|
||||
"counter(\"vertex{}\")}}) RETURN min(n.id)",
|
||||
vertices_count, indexed_label_, id_));
|
||||
permanent_assert(ret.records.size() == 1, "Vertices creation failed!");
|
||||
CHECK(ret.records.size() == 1) << "Vertices creation failed!";
|
||||
uint64_t min_id = ret.records[0][0].ValueInt();
|
||||
for (uint64_t i = 0; i < vertices_count; ++i) {
|
||||
vertices_.insert(min_id + i);
|
||||
@ -164,9 +164,8 @@ class GraphSession {
|
||||
void CreateEdges(uint64_t edges_count) {
|
||||
if (edges_count == 0) return;
|
||||
auto edges_per_node = (double)edges_count / vertices_.size();
|
||||
permanent_assert(
|
||||
std::abs(edges_per_node - (int64_t)edges_per_node) < 0.0001,
|
||||
"Edges per node not a whole number");
|
||||
CHECK(std::abs(edges_per_node - (int64_t)edges_per_node) < 0.0001)
|
||||
<< "Edges per node not a whole number";
|
||||
|
||||
auto ret = Execute(fmt::format(
|
||||
"MATCH (a:{0}) WITH a "
|
||||
@ -176,7 +175,7 @@ class GraphSession {
|
||||
"min(e.id), count(e)",
|
||||
indexed_label_, (int64_t)edges_per_node - 1, vertices_.size(), id_));
|
||||
|
||||
permanent_assert(ret.records.size() == 1, "Failed to create edges");
|
||||
CHECK(ret.records.size() == 1) << "Failed to create edges";
|
||||
uint64_t min_id = ret.records[0][0].ValueInt();
|
||||
uint64_t count = ret.records[0][1].ValueInt();
|
||||
for (uint64_t i = 0; i < count; ++i) {
|
||||
@ -361,9 +360,8 @@ int main(int argc, char **argv) {
|
||||
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
||||
google::InitGoogleLogging(argv[0]);
|
||||
|
||||
permanent_assert(FLAGS_vertex_count > 0,
|
||||
"Vertex count must be greater than 0!");
|
||||
permanent_assert(FLAGS_edge_count > 0, "Edge count must be greater than 0!");
|
||||
CHECK(FLAGS_vertex_count > 0) << "Vertex count must be greater than 0!";
|
||||
CHECK(FLAGS_edge_count > 0) << "Edge count must be greater than 0!";
|
||||
|
||||
LOG(INFO) << "Starting Memgraph long running test";
|
||||
|
||||
|
@ -19,43 +19,36 @@ TEST(ConcurrentMapSkiplist, Mix) {
|
||||
auto accessor = skiplist.access();
|
||||
|
||||
// insert 10
|
||||
permanent_assert(accessor.insert(1, 10).second == true, "add first element");
|
||||
EXPECT_TRUE(accessor.insert(1, 10).second);
|
||||
|
||||
// try insert 10 again (should fail)
|
||||
permanent_assert(accessor.insert(1, 10).second == false,
|
||||
"add the same element, should fail");
|
||||
EXPECT_FALSE(accessor.insert(1, 10).second);
|
||||
|
||||
// insert 20
|
||||
permanent_assert(accessor.insert(2, 20).second == true,
|
||||
"insert new unique element");
|
||||
EXPECT_TRUE(accessor.insert(2, 20).second);
|
||||
|
||||
print_skiplist(accessor);
|
||||
|
||||
// value at key 3 shouldn't exist
|
||||
permanent_assert((accessor.find(3) == accessor.end()) == true,
|
||||
"try to find element which doesn't exist");
|
||||
EXPECT_TRUE(accessor.find(3) == accessor.end());
|
||||
|
||||
// value at key 2 should exist
|
||||
permanent_assert((accessor.find(2) != accessor.end()) == true,
|
||||
"find iterator");
|
||||
EXPECT_TRUE(accessor.find(2) != accessor.end());
|
||||
|
||||
// at key 2 is 20 (true)
|
||||
permanent_assert(accessor.find(2)->second == 20, "find element");
|
||||
EXPECT_EQ(accessor.find(2)->second, 20);
|
||||
|
||||
// removed existing (1)
|
||||
permanent_assert(accessor.remove(1) == true, "try to remove element");
|
||||
EXPECT_TRUE(accessor.remove(1));
|
||||
|
||||
// removed non-existing (3)
|
||||
permanent_assert(accessor.remove(3) == false,
|
||||
"try to remove element which doesn't exist");
|
||||
EXPECT_FALSE(accessor.remove(3));
|
||||
|
||||
// insert (1, 10)
|
||||
permanent_assert(accessor.insert(1, 10).second == true,
|
||||
"insert unique element");
|
||||
EXPECT_TRUE(accessor.insert(1, 10).second);
|
||||
|
||||
// insert (4, 40)
|
||||
permanent_assert(accessor.insert(4, 40).second == true,
|
||||
"insert unique element");
|
||||
EXPECT_TRUE(accessor.insert(4, 40).second);
|
||||
|
||||
print_skiplist(accessor);
|
||||
}
|
||||
|
@ -16,32 +16,38 @@ TEST(ConcurrentSet, Mix) {
|
||||
|
||||
auto accessor = set.access();
|
||||
|
||||
permanent_assert(accessor.insert(1).second == true,
|
||||
"added non-existing 1? (true)");
|
||||
// added non-existing 1? (true)
|
||||
EXPECT_TRUE(accessor.insert(1).second);
|
||||
|
||||
permanent_assert(accessor.insert(1).second == false,
|
||||
"added already existing 1? (false)");
|
||||
// added already existing 1? (false)
|
||||
EXPECT_FALSE(accessor.insert(1).second);
|
||||
|
||||
permanent_assert(accessor.insert(2).second == true,
|
||||
"added non-existing 2? (true)");
|
||||
// added non-existing 2? (true)
|
||||
EXPECT_TRUE(accessor.insert(2).second);
|
||||
|
||||
permanent_assert(accessor.find(3) == accessor.end(),
|
||||
"item 3 doesn't exist? (true)");
|
||||
// item 3 doesn't exist? (true)
|
||||
EXPECT_EQ(accessor.find(3), accessor.end());
|
||||
|
||||
permanent_assert(accessor.contains(3) == false, "item 3 exists? (false)");
|
||||
// item 3 exists? (false)
|
||||
EXPECT_FALSE(accessor.contains(3));
|
||||
|
||||
permanent_assert(accessor.find(2) != accessor.end(), "item 2 exists? (true)");
|
||||
// item 2 exists? (true)
|
||||
EXPECT_NE(accessor.find(2), accessor.end());
|
||||
|
||||
permanent_assert(*accessor.find(2) == 2, "find item 2");
|
||||
// find item 2
|
||||
EXPECT_EQ(*accessor.find(2), 2);
|
||||
|
||||
permanent_assert(accessor.remove(1) == true, "removed existing 1? (true)");
|
||||
// removed existing 1? (true)
|
||||
EXPECT_TRUE(accessor.remove(1));
|
||||
|
||||
permanent_assert(accessor.remove(3) == false,
|
||||
"try to remove non existing element");
|
||||
// try to remove non existing element
|
||||
EXPECT_FALSE(accessor.remove(3));
|
||||
|
||||
permanent_assert(accessor.insert(1).second == true, "add 1 again");
|
||||
// add 1 again
|
||||
EXPECT_TRUE(accessor.insert(1).second);
|
||||
|
||||
permanent_assert(accessor.insert(4).second == true, "add 4");
|
||||
// add 4
|
||||
EXPECT_TRUE(accessor.insert(4).second);
|
||||
|
||||
print_skiplist(accessor);
|
||||
}
|
||||
|
@ -2,13 +2,13 @@
|
||||
#include <experimental/filesystem>
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
#include "glog/logging.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "communication/bolt/v1/decoder/decoder.hpp"
|
||||
#include "database/dbms.hpp"
|
||||
#include "durability/file_reader_buffer.hpp"
|
||||
#include "durability/recovery.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
DECLARE_int32(snapshot_cycle_sec);
|
||||
|
||||
@ -88,8 +88,7 @@ void TakeSnapshot(Dbms &dbms, int snapshot_max_retained_) {
|
||||
std::string GetLatestSnapshot() {
|
||||
std::vector<fs::path> files =
|
||||
GetFilesFromDir(SNAPSHOTS_RECOVERY_DEFAULT_DB_DIR);
|
||||
permanent_assert(static_cast<int>(files.size()) == 1,
|
||||
"No snapshot files in folder.");
|
||||
CHECK(static_cast<int>(files.size()) == 1) << "No snapshot files in folder.";
|
||||
std::sort(files.rbegin(), files.rend());
|
||||
return files[0];
|
||||
}
|
||||
@ -180,8 +179,7 @@ TEST_F(RecoveryTest, TestEncodingAndDecoding) {
|
||||
edges.push_back(edge);
|
||||
edge_count++;
|
||||
}
|
||||
permanent_assert(static_cast<int>(edges.size()) == 2,
|
||||
"There should be two edges.");
|
||||
CHECK(static_cast<int>(edges.size()) == 2) << "There should be two edges.";
|
||||
|
||||
EXPECT_EQ(edge_count, 2);
|
||||
EXPECT_TRUE(edges[0].to() == edges[1].to());
|
||||
|
@ -2,10 +2,10 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "glog/logging.h"
|
||||
#include "gtest/gtest.h"
|
||||
|
||||
#include "data_structures/concurrent/skiplist.hpp"
|
||||
#include "utils/assert.hpp"
|
||||
|
||||
/* The following tests validate the SkipList::position_and_count estimation
|
||||
* functionality. That function has a tunable speed vs. accuracy. The tests
|
||||
@ -25,7 +25,7 @@ auto SkiplistRange(int count) {
|
||||
|
||||
auto Median(std::vector<int> &elements) {
|
||||
auto elem_size = elements.size();
|
||||
debug_assert(elem_size > 0, "Provide some elements to get median!");
|
||||
DCHECK(elem_size > 0) << "Provide some elements to get median!";
|
||||
std::sort(elements.begin(), elements.end());
|
||||
if (elem_size % 2)
|
||||
return elements[elem_size / 2];
|
||||
|
Loading…
Reference in New Issue
Block a user