Prepare release v0.13.0
Reviewers: teon.banek, buda Subscribers: pullbot Differential Revision: https://phabricator.memgraph.io/D1667
This commit is contained in:
parent
bdc2e8d647
commit
1ec7f729d5
@ -1,11 +1,8 @@
|
||||
- name: Binaries
|
||||
archive:
|
||||
- build_debug/memgraph
|
||||
- build_debug/memgraph_distributed
|
||||
- build_release/memgraph
|
||||
- build_release/memgraph_distributed
|
||||
- build_release/tools/src/mg_import_csv
|
||||
- build_release/tools/src/mg_statsd
|
||||
- config
|
||||
filename: binaries.tar.gz
|
||||
|
||||
|
@ -31,12 +31,8 @@
|
||||
mkdir build_release
|
||||
cd build_release
|
||||
cmake -DCMAKE_BUILD_TYPE=release ..
|
||||
TIMEOUT=1200 make -j$THREADS memgraph memgraph_distributed tools memgraph__macro_benchmark memgraph__stress memgraph__manual__card_fraud_generate_snapshot memgraph__feature_benchmark__kafka__benchmark
|
||||
|
||||
# Generate distributed card fraud dataset.
|
||||
cd ../tests/distributed/card_fraud
|
||||
./generate_dataset.sh
|
||||
cd ../../..
|
||||
TIMEOUT=1200 make -j$THREADS memgraph tools memgraph__macro_benchmark memgraph__stress
|
||||
cd ..
|
||||
|
||||
# Checkout to parent commit and initialize.
|
||||
cd ../parent
|
||||
@ -83,7 +79,3 @@
|
||||
mkdir output
|
||||
cd output
|
||||
cpack -G DEB --config ../CPackConfig.cmake
|
||||
|
||||
# Generate distributed card fraud dataset.
|
||||
cd ../../tests/distributed/card_fraud
|
||||
./generate_dataset.sh
|
||||
|
@ -210,30 +210,6 @@ import_external_library(rocksdb STATIC
|
||||
CXX=${CMAKE_CXX_COMPILER}
|
||||
INSTALL_COMMAND true)
|
||||
|
||||
# Setup Cap'n Proto
|
||||
ExternalProject_Add(capnproto-proj
|
||||
PREFIX ${CMAKE_CURRENT_SOURCE_DIR}/capnproto
|
||||
SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/capnproto
|
||||
BINARY_DIR ${CMAKE_CURRENT_SOURCE_DIR}/capnproto
|
||||
CONFIGURE_COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/capnproto/configure
|
||||
--prefix=${CMAKE_CURRENT_SOURCE_DIR}/capnproto/local
|
||||
--enable-shared=no --silent
|
||||
CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER}
|
||||
BUILD_COMMAND make -j${NPROC} check)
|
||||
set(CAPNP_INCLUDE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/capnproto/local/include
|
||||
CACHE FILEPATH "Path to capnproto include directory" FORCE)
|
||||
set(CAPNP_LIBRARY ${CMAKE_CURRENT_SOURCE_DIR}/capnproto/local/lib/libcapnp.a
|
||||
CACHE FILEPATH "Path to capnproto library" FORCE)
|
||||
set(KJ_LIBRARY ${CMAKE_CURRENT_SOURCE_DIR}/capnproto/local/lib/libkj.a
|
||||
CACHE FILEPATH "Path to kj library (used by capnproto)" FORCE)
|
||||
import_library(capnp STATIC ${CAPNP_LIBRARY} capnproto-proj)
|
||||
import_library(kj STATIC ${KJ_LIBRARY} capnproto-proj)
|
||||
set(CAPNP_EXE ${CMAKE_CURRENT_SOURCE_DIR}/capnproto/local/bin/capnp
|
||||
CACHE FILEPATH "Path to capnproto executable" FORCE)
|
||||
set(CAPNP_CXX_EXE ${CMAKE_CURRENT_SOURCE_DIR}/capnproto/local/bin/capnpc-c++
|
||||
CACHE FILEPATH "Path to capnproto c++ plugin executable" FORCE)
|
||||
mark_as_advanced(CAPNP_INCLUDE_DIR CAPNP_LIBRARY KJ_LIBRARY CAPNP_EXE CAPNP_CXX_EXE)
|
||||
|
||||
# Setup librdkafka.
|
||||
import_external_library(librdkafka STATIC
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/librdkafka/lib/librdkafka.a
|
||||
|
@ -125,15 +125,3 @@ clone git://deps.memgraph.io/zlib.git zlib $zlib_tag
|
||||
|
||||
rocksdb_tag="dbd8fa09b823826dd2a30bc119dad7a6fa9a4c6d" # v5.11.3 Mar 12, 2018
|
||||
clone git://deps.memgraph.io/rocksdb.git rocksdb $rocksdb_tag
|
||||
|
||||
# Cap'n Proto serialization (and RPC) lib
|
||||
wget -nv http://deps.memgraph.io/capnproto-c++-0.6.1.tar.gz -O capnproto.tar.gz
|
||||
tar -xzf capnproto.tar.gz
|
||||
rm -rf capnproto
|
||||
mv capnproto-c++-0.6.1 capnproto
|
||||
rm capnproto.tar.gz
|
||||
|
||||
# kafka
|
||||
kafka_tag="c319b4e987d0bc4fe4f01cf91419d90b62061655" # Mar 8, 2018
|
||||
# git clone https://github.com/edenhill/librdkafka.git
|
||||
clone git://deps.memgraph.io/librdkafka.git librdkafka $kafka_tag
|
||||
|
@ -4,12 +4,9 @@
|
||||
add_subdirectory(lisp)
|
||||
add_subdirectory(utils)
|
||||
add_subdirectory(requests)
|
||||
add_subdirectory(integrations)
|
||||
add_subdirectory(io)
|
||||
add_subdirectory(telemetry)
|
||||
add_subdirectory(communication)
|
||||
add_subdirectory(stats)
|
||||
add_subdirectory(auth)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Memgraph Single Node
|
||||
@ -24,12 +21,10 @@ set(mg_single_node_sources
|
||||
durability/single_node/recovery.cpp
|
||||
durability/single_node/snapshooter.cpp
|
||||
durability/single_node/wal.cpp
|
||||
glue/auth.cpp
|
||||
glue/communication.cpp
|
||||
query/common.cpp
|
||||
query/frontend/ast/ast.cpp
|
||||
query/frontend/ast/cypher_main_visitor.cpp
|
||||
query/frontend/semantic/required_privileges.cpp
|
||||
query/frontend/semantic/symbol_generator.cpp
|
||||
query/frontend/stripped.cpp
|
||||
query/interpret/awesome_memgraph_functions.cpp
|
||||
@ -60,8 +55,8 @@ add_lcp_single_node(query/plan/operator.lcp)
|
||||
add_custom_target(generate_lcp_single_node DEPENDS ${generated_lcp_single_node_files})
|
||||
|
||||
set(MG_SINGLE_NODE_LIBS stdc++fs Threads::Threads fmt cppitertools
|
||||
antlr_opencypher_parser_lib dl glog gflags capnp kj
|
||||
mg-utils mg-io mg-integrations-kafka mg-requests mg-communication mg-auth mg-stats)
|
||||
antlr_opencypher_parser_lib dl glog gflags
|
||||
mg-utils mg-io mg-requests mg-communication)
|
||||
|
||||
if (USE_LTALLOC)
|
||||
list(APPEND MG_SINGLE_NODE_LIBS ltalloc)
|
||||
@ -83,161 +78,6 @@ target_compile_definitions(mg-single-node PUBLIC MG_SINGLE_NODE)
|
||||
# END Memgraph Single Node
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# Memgraph Distributed
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
set(mg_distributed_sources
|
||||
database/distributed/distributed_counters.cpp
|
||||
database/distributed/distributed_graph_db.cpp
|
||||
distributed/bfs_rpc_clients.cpp
|
||||
distributed/bfs_subcursor.cpp
|
||||
distributed/cluster_discovery_master.cpp
|
||||
distributed/cluster_discovery_worker.cpp
|
||||
distributed/coordination.cpp
|
||||
distributed/coordination_master.cpp
|
||||
distributed/coordination_worker.cpp
|
||||
distributed/data_manager.cpp
|
||||
distributed/data_rpc_clients.cpp
|
||||
distributed/data_rpc_server.cpp
|
||||
distributed/dgp/partitioner.cpp
|
||||
distributed/dgp/vertex_migrator.cpp
|
||||
distributed/durability_rpc_master.cpp
|
||||
distributed/durability_rpc_worker.cpp
|
||||
distributed/dynamic_worker.cpp
|
||||
distributed/index_rpc_server.cpp
|
||||
distributed/plan_consumer.cpp
|
||||
distributed/plan_dispatcher.cpp
|
||||
distributed/produce_rpc_server.cpp
|
||||
distributed/pull_rpc_clients.cpp
|
||||
distributed/updates_rpc_clients.cpp
|
||||
distributed/updates_rpc_server.cpp
|
||||
query/distributed_interpreter.cpp
|
||||
query/plan/distributed.cpp
|
||||
query/plan/distributed_ops.cpp
|
||||
query/plan/distributed_pretty_print.cpp
|
||||
storage/distributed/concurrent_id_mapper_master.cpp
|
||||
storage/distributed/concurrent_id_mapper_worker.cpp
|
||||
transactions/distributed/engine_master.cpp
|
||||
transactions/distributed/engine_worker.cpp
|
||||
data_structures/concurrent/skiplist_gc.cpp
|
||||
database/distributed/config.cpp
|
||||
database/distributed/graph_db_accessor.cpp
|
||||
durability/distributed/state_delta.cpp
|
||||
durability/distributed/paths.cpp
|
||||
durability/distributed/recovery.cpp
|
||||
durability/distributed/snapshooter.cpp
|
||||
durability/distributed/wal.cpp
|
||||
glue/auth.cpp
|
||||
glue/communication.cpp
|
||||
query/common.cpp
|
||||
query/frontend/ast/ast.cpp
|
||||
query/frontend/ast/cypher_main_visitor.cpp
|
||||
query/frontend/semantic/required_privileges.cpp
|
||||
query/frontend/semantic/symbol_generator.cpp
|
||||
query/frontend/stripped.cpp
|
||||
query/interpret/awesome_memgraph_functions.cpp
|
||||
query/interpreter.cpp
|
||||
query/plan/operator.cpp
|
||||
query/plan/preprocess.cpp
|
||||
query/plan/pretty_print.cpp
|
||||
query/plan/rule_based_planner.cpp
|
||||
query/plan/variable_start_planner.cpp
|
||||
query/repl.cpp
|
||||
query/serialization.cpp
|
||||
query/typed_value.cpp
|
||||
storage/common/property_value.cpp
|
||||
storage/common/property_value_store.cpp
|
||||
storage/distributed/edge_accessor.cpp
|
||||
storage/distributed/record_accessor.cpp
|
||||
storage/distributed/serialization.cpp
|
||||
storage/distributed/vertex_accessor.cpp
|
||||
storage/locking/record_lock.cpp
|
||||
memgraph_init.cpp
|
||||
transactions/distributed/engine_single_node.cpp
|
||||
)
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
define_add_capnp(mg_distributed_sources generated_capnp_files)
|
||||
|
||||
define_add_lcp(add_lcp_distributed mg_distributed_sources generated_lcp_distributed_files)
|
||||
|
||||
add_lcp_distributed(durability/distributed/state_delta.lcp)
|
||||
add_lcp_distributed(database/distributed/counters_rpc_messages.lcp CAPNP_SCHEMA @0x95a2c3ea3871e945)
|
||||
add_capnp(database/distributed/counters_rpc_messages.capnp)
|
||||
add_lcp_distributed(database/distributed/serialization.lcp CAPNP_SCHEMA @0xdea01657b3563887
|
||||
DEPENDS durability/distributed/state_delta.lcp)
|
||||
add_capnp(database/distributed/serialization.capnp)
|
||||
add_lcp_distributed(distributed/bfs_rpc_messages.lcp CAPNP_SCHEMA @0x8e508640b09b6d2a)
|
||||
add_capnp(distributed/bfs_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/coordination_rpc_messages.lcp CAPNP_SCHEMA @0x93df0c4703cf98fb)
|
||||
add_capnp(distributed/coordination_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/data_rpc_messages.lcp CAPNP_SCHEMA @0xc1c8a341ba37aaf5)
|
||||
add_capnp(distributed/data_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/durability_rpc_messages.lcp CAPNP_SCHEMA @0xf5e53bc271e2163d)
|
||||
add_capnp(distributed/durability_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/index_rpc_messages.lcp CAPNP_SCHEMA @0xa8aab46862945bd6)
|
||||
add_capnp(distributed/index_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/plan_rpc_messages.lcp CAPNP_SCHEMA @0xfcbc48dc9f106d28)
|
||||
add_capnp(distributed/plan_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/pull_produce_rpc_messages.lcp CAPNP_SCHEMA @0xa78a9254a73685bd
|
||||
DEPENDS transactions/distributed/serialization.lcp)
|
||||
add_capnp(distributed/pull_produce_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/storage_gc_rpc_messages.lcp CAPNP_SCHEMA @0xd705663dfe36cf81)
|
||||
add_capnp(distributed/storage_gc_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/token_sharing_rpc_messages.lcp CAPNP_SCHEMA @0x8f295db54ec4caec)
|
||||
add_capnp(distributed/token_sharing_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/updates_rpc_messages.lcp CAPNP_SCHEMA @0x82d5f38d73c7b53a)
|
||||
add_capnp(distributed/updates_rpc_messages.capnp)
|
||||
add_lcp_distributed(distributed/dynamic_worker_rpc_messages.lcp CAPNP_SCHEMA @0x8c53f6c9a0c71b05)
|
||||
add_capnp(distributed/dynamic_worker_rpc_messages.capnp)
|
||||
|
||||
# distributed_ops.lcp is leading the capnp code generation, so we don't need
|
||||
# to generate any capnp for operator.lcp
|
||||
add_lcp_distributed(query/frontend/ast/ast.lcp)
|
||||
add_lcp_distributed(query/frontend/ast/ast_serialization.lcp CAPNP_SCHEMA @0xb107d3d6b4b1600b
|
||||
DEPENDS query/frontend/ast/ast.lcp)
|
||||
add_capnp(query/frontend/ast/ast_serialization.capnp)
|
||||
add_lcp_distributed(query/plan/operator.lcp)
|
||||
add_lcp_distributed(query/plan/distributed_ops.lcp CAPNP_SCHEMA @0xe5cae8d045d30c42
|
||||
DEPENDS query/plan/operator.lcp)
|
||||
add_capnp(query/plan/distributed_ops.capnp)
|
||||
|
||||
add_lcp_distributed(storage/distributed/concurrent_id_mapper_rpc_messages.lcp CAPNP_SCHEMA @0xa6068dae93d225dd)
|
||||
add_capnp(storage/distributed/concurrent_id_mapper_rpc_messages.capnp)
|
||||
add_lcp_distributed(transactions/distributed/engine_rpc_messages.lcp CAPNP_SCHEMA @0xde02b7c49180cad5
|
||||
DEPENDS transactions/distributed/serialization.lcp)
|
||||
add_capnp(transactions/distributed/engine_rpc_messages.capnp)
|
||||
|
||||
add_custom_target(generate_lcp_distributed DEPENDS ${generated_lcp_distributed_files})
|
||||
|
||||
# Registering capnp must come after registering lcp files.
|
||||
|
||||
add_capnp(communication/rpc/messages.capnp)
|
||||
add_capnp(durability/distributed/serialization.capnp)
|
||||
add_capnp(query/frontend/semantic/symbol.capnp)
|
||||
add_capnp(query/serialization.capnp)
|
||||
add_capnp(storage/distributed/serialization.capnp)
|
||||
|
||||
add_custom_target(generate_capnp DEPENDS generate_lcp_distributed ${generated_capnp_files})
|
||||
|
||||
set(MG_DISTRIBUTED_LIBS stdc++fs Threads::Threads fmt cppitertools
|
||||
antlr_opencypher_parser_lib dl glog gflags capnp kj
|
||||
mg-utils mg-io mg-integrations-kafka mg-requests mg-communication mg-auth mg-stats)
|
||||
|
||||
# STATIC library used by memgraph executables
|
||||
add_library(mg-distributed STATIC ${mg_distributed_sources})
|
||||
target_link_libraries(mg-distributed ${MG_DISTRIBUTED_LIBS})
|
||||
add_dependencies(mg-distributed generate_opencypher_parser)
|
||||
add_dependencies(mg-distributed generate_lcp_distributed)
|
||||
add_dependencies(mg-distributed generate_capnp)
|
||||
target_compile_definitions(mg-distributed PUBLIC MG_DISTRIBUTED)
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# END Memgraph Distributed
|
||||
# ----------------------------------------------------------------------------
|
||||
|
||||
string(TOLOWER ${CMAKE_BUILD_TYPE} lower_build_type)
|
||||
|
||||
# STATIC library used to store key-value pairs
|
||||
@ -311,18 +151,3 @@ install(
|
||||
${CMAKE_BINARY_DIR}/tests/manual/bolt_client
|
||||
WORKING_DIRECTORY ${examples})")
|
||||
install(DIRECTORY ${examples}/build/ DESTINATION share/memgraph/examples)
|
||||
|
||||
|
||||
# memgraph distributed main executable
|
||||
add_executable(memgraph_distributed memgraph_distributed.cpp)
|
||||
target_link_libraries(memgraph_distributed mg-distributed kvstore_lib telemetry_lib)
|
||||
set_target_properties(memgraph_distributed PROPERTIES
|
||||
# Set the executable output name to include version information.
|
||||
OUTPUT_NAME "memgraph_distributed-${memgraph_VERSION}-${COMMIT_HASH}_${CMAKE_BUILD_TYPE}"
|
||||
# Output the executable in main binary dir.
|
||||
RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
|
||||
# Create symlink to the built executable.
|
||||
add_custom_command(TARGET memgraph_distributed POST_BUILD
|
||||
COMMAND ${CMAKE_COMMAND} -E create_symlink $<TARGET_FILE:memgraph_distributed> ${CMAKE_BINARY_DIR}/memgraph_distributed
|
||||
BYPRODUCTS ${CMAKE_BINARY_DIR}/memgraph_distributed
|
||||
COMMENT Creating symlink to memgraph distributed executable)
|
||||
|
@ -4,24 +4,9 @@ set(communication_src_files
|
||||
client.cpp
|
||||
context.cpp
|
||||
helpers.cpp
|
||||
init.cpp
|
||||
rpc/client.cpp
|
||||
rpc/protocol.cpp
|
||||
rpc/server.cpp)
|
||||
|
||||
# TODO: Extract data_structures to library
|
||||
set(communication_src_files ${communication_src_files}
|
||||
${CMAKE_SOURCE_DIR}/src/data_structures/concurrent/skiplist_gc.cpp)
|
||||
|
||||
define_add_capnp(communication_src_files communication_capnp_files)
|
||||
|
||||
add_capnp(rpc/messages.capnp)
|
||||
|
||||
add_custom_target(generate_communication_capnp DEPENDS ${communication_capnp_files})
|
||||
init.cpp)
|
||||
|
||||
add_library(mg-communication STATIC ${communication_src_files})
|
||||
target_link_libraries(mg-communication Threads::Threads mg-utils mg-io fmt glog gflags)
|
||||
target_link_libraries(mg-communication ${OPENSSL_LIBRARIES})
|
||||
target_include_directories(mg-communication SYSTEM PUBLIC ${OPENSSL_INCLUDE_DIR})
|
||||
target_link_libraries(mg-communication capnp kj)
|
||||
add_dependencies(mg-communication generate_communication_capnp)
|
||||
|
@ -60,10 +60,6 @@ class Session {
|
||||
/** Aborts currently running query. */
|
||||
virtual void Abort() = 0;
|
||||
|
||||
/** Return `true` if the user was successfully authenticated. */
|
||||
virtual bool Authenticate(const std::string &username,
|
||||
const std::string &password) = 0;
|
||||
|
||||
/**
|
||||
* Executes the session after data has been read into the buffer.
|
||||
* Goes through the bolt states in order to execute commands from the client.
|
||||
|
@ -60,38 +60,6 @@ State StateInitRun(Session &session) {
|
||||
LOG(INFO) << fmt::format("Client connected '{}'", client_name.ValueString())
|
||||
<< std::endl;
|
||||
|
||||
// Get authentication data.
|
||||
std::string username, password;
|
||||
auto &data = metadata.ValueMap();
|
||||
if (!data.count("scheme")) {
|
||||
LOG(WARNING) << "The client didn't supply authentication information!";
|
||||
return State::Close;
|
||||
}
|
||||
if (data["scheme"].ValueString() == "basic") {
|
||||
if (!data.count("principal") || !data.count("credentials")) {
|
||||
LOG(WARNING) << "The client didn't supply authentication information!";
|
||||
return State::Close;
|
||||
}
|
||||
username = data["principal"].ValueString();
|
||||
password = data["credentials"].ValueString();
|
||||
} else if (data["scheme"].ValueString() != "none") {
|
||||
LOG(WARNING) << "Unsupported authentication scheme: "
|
||||
<< data["scheme"].ValueString();
|
||||
return State::Close;
|
||||
}
|
||||
|
||||
// Authenticate the user.
|
||||
if (!session.Authenticate(username, password)) {
|
||||
if (!session.encoder_.MessageFailure(
|
||||
{{"code", "Memgraph.ClientError.Security.Unauthenticated"},
|
||||
{"message", "Authentication failure"}})) {
|
||||
DLOG(WARNING) << "Couldn't send failure message to the client!";
|
||||
}
|
||||
// Throw an exception to indicate to the network stack that the session
|
||||
// should be closed and cleaned up.
|
||||
throw SessionClosedException("The client is not authenticated!");
|
||||
}
|
||||
|
||||
// Return success.
|
||||
if (!session.encoder_.MessageSuccess()) {
|
||||
DLOG(WARNING) << "Couldn't send success message to the client!";
|
||||
|
@ -65,10 +65,6 @@ bool VersionConsistency(const fs::path &durability_dir) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DistributedVersionConsistency(const int64_t master_version) {
|
||||
return durability::kVersion == master_version;
|
||||
}
|
||||
|
||||
bool ContainsDurabilityFiles(const fs::path &durability_dir) {
|
||||
for (const auto &durability_type : {kSnapshotDir, kWalDir}) {
|
||||
auto recovery_dir = durability_dir / durability_type;
|
||||
|
@ -74,15 +74,6 @@ bool ReadSnapshotSummary(HashedFileReader &buffer, int64_t &vertex_count,
|
||||
bool VersionConsistency(
|
||||
const std::experimental::filesystem::path &durability_dir);
|
||||
|
||||
/**
|
||||
* Checks whether the current memgraph binary (on a worker) is
|
||||
* version consistent with the cluster master.
|
||||
*
|
||||
* @param master_version - Version of the master.
|
||||
* @return - True if versions match.
|
||||
*/
|
||||
bool DistributedVersionConsistency(const int64_t master_version);
|
||||
|
||||
/**
|
||||
* Checks whether the durability directory contains snapshot
|
||||
* or write-ahead log file.
|
||||
|
@ -4,13 +4,5 @@ set(io_src_files
|
||||
network/socket.cpp
|
||||
network/utils.cpp)
|
||||
|
||||
define_add_capnp(io_src_files io_capnp_files)
|
||||
|
||||
add_capnp(network/endpoint.capnp)
|
||||
|
||||
add_custom_target(generate_io_capnp DEPENDS ${io_capnp_files})
|
||||
|
||||
add_library(mg-io STATIC ${io_src_files})
|
||||
target_link_libraries(mg-io stdc++fs Threads::Threads fmt glog mg-utils)
|
||||
target_link_libraries(mg-io capnp kj)
|
||||
add_dependencies(mg-io generate_io_capnp)
|
||||
|
@ -24,18 +24,6 @@ Endpoint::Endpoint(const std::string &address, uint16_t port)
|
||||
CHECK(family_ != 0) << "Not a valid IPv4 or IPv6 address: " << address;
|
||||
}
|
||||
|
||||
void Save(const Endpoint &endpoint, capnp::Endpoint::Builder *builder) {
|
||||
builder->setAddress(endpoint.address());
|
||||
builder->setPort(endpoint.port());
|
||||
builder->setFamily(endpoint.family());
|
||||
}
|
||||
|
||||
void Load(Endpoint *endpoint, const capnp::Endpoint::Reader &reader) {
|
||||
endpoint->address_ = reader.getAddress();
|
||||
endpoint->port_ = reader.getPort();
|
||||
endpoint->family_ = reader.getFamily();
|
||||
}
|
||||
|
||||
bool Endpoint::operator==(const Endpoint &other) const {
|
||||
return address_ == other.address_ && port_ == other.port_ &&
|
||||
family_ == other.family_;
|
||||
|
@ -5,7 +5,6 @@
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
||||
#include "io/network/endpoint.capnp.h"
|
||||
#include "utils/exceptions.hpp"
|
||||
|
||||
namespace io::network {
|
||||
@ -33,8 +32,4 @@ class Endpoint {
|
||||
unsigned char family_{0};
|
||||
};
|
||||
|
||||
void Save(const Endpoint &endpoint, capnp::Endpoint::Builder *builder);
|
||||
|
||||
void Load(Endpoint *endpoint, const capnp::Endpoint::Reader &reader);
|
||||
|
||||
} // namespace io::network
|
||||
|
@ -11,8 +11,6 @@
|
||||
|
||||
#include "communication/server.hpp"
|
||||
#include "database/single_node/graph_db.hpp"
|
||||
#include "integrations/kafka/exceptions.hpp"
|
||||
#include "integrations/kafka/streams.hpp"
|
||||
#include "memgraph_init.hpp"
|
||||
#include "query/exceptions.hpp"
|
||||
#include "telemetry/telemetry.hpp"
|
||||
@ -48,25 +46,6 @@ void SingleNodeMain() {
|
||||
query::Interpreter interpreter;
|
||||
SessionData session_data{&db, &interpreter};
|
||||
|
||||
integrations::kafka::Streams kafka_streams{
|
||||
std::experimental::filesystem::path(FLAGS_durability_directory) /
|
||||
"streams",
|
||||
[&session_data](
|
||||
const std::string &query,
|
||||
const std::map<std::string, communication::bolt::Value> ¶ms) {
|
||||
KafkaStreamWriter(session_data, query, params);
|
||||
}};
|
||||
|
||||
try {
|
||||
// Recover possible streams.
|
||||
kafka_streams.Recover();
|
||||
} catch (const integrations::kafka::KafkaStreamException &e) {
|
||||
LOG(ERROR) << e.what();
|
||||
}
|
||||
|
||||
session_data.interpreter->auth_ = &session_data.auth;
|
||||
session_data.interpreter->kafka_streams_ = &kafka_streams;
|
||||
|
||||
ServerContext context;
|
||||
std::string service_name = "Bolt";
|
||||
if (FLAGS_key_file != "" && FLAGS_cert_file != "") {
|
||||
|
@ -3,11 +3,9 @@
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include "config.hpp"
|
||||
#include "glue/auth.hpp"
|
||||
#include "glue/communication.hpp"
|
||||
#include "query/exceptions.hpp"
|
||||
#include "requests/requests.hpp"
|
||||
#include "stats/stats.hpp"
|
||||
#include "utils/signals.hpp"
|
||||
#include "utils/sysinfo/memory.hpp"
|
||||
#include "utils/terminate_handler.hpp"
|
||||
@ -28,8 +26,7 @@ BoltSession::BoltSession(SessionData *data, const io::network::Endpoint &,
|
||||
: communication::bolt::Session<communication::InputStream,
|
||||
communication::OutputStream>(input_stream,
|
||||
output_stream),
|
||||
transaction_engine_(data->db, data->interpreter),
|
||||
auth_(&data->auth) {}
|
||||
transaction_engine_(data->db, data->interpreter) {}
|
||||
|
||||
using TEncoder =
|
||||
communication::bolt::Session<communication::InputStream,
|
||||
@ -42,21 +39,7 @@ std::vector<std::string> BoltSession::Interpret(
|
||||
for (const auto &kv : params)
|
||||
params_pv.emplace(kv.first, glue::ToPropertyValue(kv.second));
|
||||
try {
|
||||
auto result = transaction_engine_.Interpret(query, params_pv);
|
||||
if (user_) {
|
||||
const auto &permissions = user_->GetPermissions();
|
||||
for (const auto &privilege : result.second) {
|
||||
if (permissions.Has(glue::PrivilegeToPermission(privilege)) !=
|
||||
auth::PermissionLevel::GRANT) {
|
||||
transaction_engine_.Abort();
|
||||
throw communication::bolt::ClientError(
|
||||
"You are not authorized to execute this query! Please contact "
|
||||
"your database administrator.");
|
||||
}
|
||||
}
|
||||
}
|
||||
return result.first;
|
||||
|
||||
return transaction_engine_.Interpret(query, params_pv);
|
||||
} catch (const query::QueryException &e) {
|
||||
// Wrap QueryException into ClientError, because we want to allow the
|
||||
// client to fix their query.
|
||||
@ -83,13 +66,6 @@ std::map<std::string, communication::bolt::Value> BoltSession::PullAll(
|
||||
|
||||
void BoltSession::Abort() { transaction_engine_.Abort(); }
|
||||
|
||||
bool BoltSession::Authenticate(const std::string &username,
|
||||
const std::string &password) {
|
||||
if (!auth_->HasUsers()) return true;
|
||||
user_ = auth_->Authenticate(username, password);
|
||||
return !!user_;
|
||||
}
|
||||
|
||||
BoltSession::TypedValueResultStream::TypedValueResultStream(TEncoder *encoder)
|
||||
: encoder_(encoder) {}
|
||||
|
||||
@ -103,24 +79,6 @@ void BoltSession::TypedValueResultStream::Result(
|
||||
encoder_->MessageRecord(decoded_values);
|
||||
}
|
||||
|
||||
void KafkaStreamWriter(
|
||||
SessionData &session_data, const std::string &query,
|
||||
const std::map<std::string, communication::bolt::Value> ¶ms) {
|
||||
auto dba = session_data.db->Access();
|
||||
KafkaResultStream stream;
|
||||
std::map<std::string, PropertyValue> params_pv;
|
||||
for (const auto &kv : params)
|
||||
params_pv.emplace(kv.first, glue::ToPropertyValue(kv.second));
|
||||
try {
|
||||
(*session_data.interpreter)(query, *dba, params_pv, false).PullAll(stream);
|
||||
dba->Commit();
|
||||
} catch (const utils::BasicException &e) {
|
||||
LOG(WARNING) << "[Kafka] query execution failed with an exception: "
|
||||
<< e.what();
|
||||
dba->Abort();
|
||||
}
|
||||
};
|
||||
|
||||
// Needed to correctly handle memgraph destruction from a signal handler.
|
||||
// Without having some sort of a flag, it is possible that a signal is handled
|
||||
// when we are exiting main, inside destructors of database::GraphDb and
|
||||
@ -175,9 +133,6 @@ int WithInit(int argc, char **argv,
|
||||
// Unhandled exception handler init.
|
||||
std::set_terminate(&utils::TerminateHandler);
|
||||
|
||||
stats::InitStatsLogging(get_stats_prefix());
|
||||
utils::OnScopeExit stop_stats([] { stats::StopStatsLogging(); });
|
||||
|
||||
// Initialize the communication library.
|
||||
communication::Init();
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <gflags/gflags.h>
|
||||
|
||||
#include "auth/auth.hpp"
|
||||
#include "communication/bolt/v1/session.hpp"
|
||||
#include "communication/init.hpp"
|
||||
#include "communication/session.hpp"
|
||||
@ -23,8 +22,6 @@ DECLARE_string(durability_directory);
|
||||
struct SessionData {
|
||||
database::GraphDb *db{nullptr};
|
||||
query::Interpreter *interpreter{nullptr};
|
||||
auth::Auth auth{
|
||||
std::experimental::filesystem::path(FLAGS_durability_directory) / "auth"};
|
||||
};
|
||||
|
||||
class BoltSession final
|
||||
@ -47,9 +44,6 @@ class BoltSession final
|
||||
|
||||
void Abort() override;
|
||||
|
||||
bool Authenticate(const std::string &username,
|
||||
const std::string &password) override;
|
||||
|
||||
private:
|
||||
/// Wrapper around TEncoder which converts TypedValue to Value
|
||||
/// before forwarding the calls to original TEncoder.
|
||||
@ -64,8 +58,6 @@ class BoltSession final
|
||||
};
|
||||
|
||||
query::TransactionEngine transaction_engine_;
|
||||
auth::Auth *auth_;
|
||||
std::experimental::optional<auth::User> user_;
|
||||
};
|
||||
|
||||
/// Class that implements ResultStream API for Kafka.
|
||||
@ -77,11 +69,6 @@ class KafkaResultStream {
|
||||
void Result(const std::vector<query::TypedValue> &) {}
|
||||
};
|
||||
|
||||
/// Writes data streamed from kafka to memgraph.
|
||||
void KafkaStreamWriter(
|
||||
SessionData &session_data, const std::string &query,
|
||||
const std::map<std::string, communication::bolt::Value> ¶ms);
|
||||
|
||||
/// Set up signal handlers and register `shutdown` on SIGTERM and SIGINT.
|
||||
/// In most cases you don't have to call this. If you are using a custom server
|
||||
/// startup function for `WithInit`, then you probably need to use this to
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "query/exceptions.hpp"
|
||||
#include "utils/serialization.hpp"
|
||||
#include "utils/string.hpp"
|
||||
|
||||
namespace query {
|
||||
|
@ -5,14 +5,6 @@
|
||||
#include "query/frontend/semantic/symbol_table.hpp"
|
||||
#include "query/parameters.hpp"
|
||||
|
||||
namespace auth {
|
||||
class Auth;
|
||||
} // namespace auth
|
||||
|
||||
namespace integrations::kafka {
|
||||
class Streams;
|
||||
} // namespace integrations::kafka
|
||||
|
||||
namespace query {
|
||||
|
||||
struct EvaluationContext {
|
||||
@ -36,9 +28,6 @@ class Context {
|
||||
bool is_index_created_ = false;
|
||||
SymbolTable symbol_table_;
|
||||
EvaluationContext evaluation_context_;
|
||||
|
||||
auth::Auth *auth_ = nullptr;
|
||||
integrations::kafka::Streams *kafka_streams_ = nullptr;
|
||||
};
|
||||
|
||||
struct ParsingContext {
|
||||
|
@ -2175,343 +2175,6 @@ cpp<#
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class auth-query (clause)
|
||||
((action "Action" :scope :public)
|
||||
(user "std::string" :scope :public)
|
||||
(role "std::string" :scope :public)
|
||||
(user-or-role "std::string" :scope :public)
|
||||
(password "Expression *" :initval "nullptr" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(privileges "std::vector<Privilege>" :scope :public
|
||||
:capnp-save (lambda (builder member capnp-name)
|
||||
#>cpp
|
||||
for (size_t i = 0; i < ${member}.size(); ++i) {
|
||||
switch (${member}[i]) {
|
||||
case AuthQuery::Privilege::CREATE:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::CREATE);
|
||||
break;
|
||||
case AuthQuery::Privilege::DELETE:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::DELETE);
|
||||
break;
|
||||
case AuthQuery::Privilege::MATCH:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::MATCH);
|
||||
break;
|
||||
case AuthQuery::Privilege::MERGE:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::MERGE);
|
||||
break;
|
||||
case AuthQuery::Privilege::SET:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::SET);
|
||||
break;
|
||||
case AuthQuery::Privilege::REMOVE:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::REMOVE);
|
||||
break;
|
||||
case AuthQuery::Privilege::INDEX:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::INDEX);
|
||||
break;
|
||||
case AuthQuery::Privilege::AUTH:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::AUTH);
|
||||
break;
|
||||
case AuthQuery::Privilege::STREAM:
|
||||
${builder}.set(i, capnp::AuthQuery::Privilege::STREAM);
|
||||
break;
|
||||
}
|
||||
}
|
||||
cpp<#)
|
||||
:capnp-load (lambda (reader member capnp-name)
|
||||
#>cpp
|
||||
${member}.resize(${reader}.size());
|
||||
size_t i = 0;
|
||||
for (const auto &val : ${reader}) {
|
||||
switch (val) {
|
||||
case capnp::AuthQuery::Privilege::CREATE:
|
||||
${member}[i] = AuthQuery::Privilege::CREATE;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::DELETE:
|
||||
${member}[i] = AuthQuery::Privilege::DELETE;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::MATCH:
|
||||
${member}[i] = AuthQuery::Privilege::MATCH;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::MERGE:
|
||||
${member}[i] = AuthQuery::Privilege::MERGE;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::SET:
|
||||
${member}[i] = AuthQuery::Privilege::SET;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::REMOVE:
|
||||
${member}[i] = AuthQuery::Privilege::REMOVE;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::INDEX:
|
||||
${member}[i] = AuthQuery::Privilege::INDEX;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::AUTH:
|
||||
${member}[i] = AuthQuery::Privilege::AUTH;
|
||||
break;
|
||||
case capnp::AuthQuery::Privilege::STREAM:
|
||||
${member}[i] = AuthQuery::Privilege::STREAM;
|
||||
break;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
cpp<#)))
|
||||
(:public
|
||||
(lcp:define-enum action
|
||||
(create-role drop-role show-roles create-user
|
||||
set-password drop-user show-users set-role
|
||||
clear-role grant-privilege deny-privilege
|
||||
revoke-privilege show-privileges
|
||||
show-role-for-user show-users-for-role)
|
||||
(:serialize :capnp))
|
||||
(lcp:define-enum privilege
|
||||
(create delete match merge set remove index auth stream)
|
||||
(:serialize :capnp))
|
||||
#>cpp
|
||||
AuthQuery() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
AuthQuery *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<AuthQuery>(
|
||||
action_, user_, role_, user_or_role_,
|
||||
password_ ? password_->Clone(storage) : nullptr, privileges_);
|
||||
}
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit AuthQuery(int uid) : Clause(uid) {}
|
||||
|
||||
explicit AuthQuery(int uid, Action action, std::string user, std::string role,
|
||||
std::string user_or_role, Expression *password,
|
||||
std::vector<Privilege> privileges)
|
||||
: Clause(uid),
|
||||
action_(action),
|
||||
user_(user),
|
||||
role_(role),
|
||||
user_or_role_(user_or_role),
|
||||
password_(password),
|
||||
privileges_(privileges) {}
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
#>cpp
|
||||
// Constant that holds all available privileges.
|
||||
const std::vector<AuthQuery::Privilege> kPrivilegesAll = {
|
||||
AuthQuery::Privilege::CREATE, AuthQuery::Privilege::DELETE,
|
||||
AuthQuery::Privilege::MATCH, AuthQuery::Privilege::MERGE,
|
||||
AuthQuery::Privilege::SET, AuthQuery::Privilege::REMOVE,
|
||||
AuthQuery::Privilege::INDEX, AuthQuery::Privilege::AUTH,
|
||||
AuthQuery::Privilege::STREAM};
|
||||
cpp<#
|
||||
|
||||
(lcp:define-class create-stream (clause)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(stream-uri "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(stream-topic "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(transform-uri "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(batch-interval-in-ms "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(batch-size "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *")))
|
||||
(:public
|
||||
#>cpp
|
||||
CreateStream() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
CreateStream *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<CreateStream>(
|
||||
stream_name_, stream_uri_->Clone(storage),
|
||||
stream_topic_->Clone(storage), transform_uri_->Clone(storage),
|
||||
batch_interval_in_ms_ ? batch_interval_in_ms_->Clone(storage) : nullptr,
|
||||
batch_size_ ? batch_size_->Clone(storage) : nullptr);
|
||||
}
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit CreateStream(int uid) : Clause(uid) {}
|
||||
CreateStream(int uid, std::string stream_name, Expression *stream_uri,
|
||||
Expression *stream_topic, Expression *transform_uri,
|
||||
Expression *batch_interval_in_ms, Expression *batch_size)
|
||||
: Clause(uid),
|
||||
stream_name_(std::move(stream_name)),
|
||||
stream_uri_(stream_uri),
|
||||
stream_topic_(stream_topic),
|
||||
transform_uri_(transform_uri),
|
||||
batch_interval_in_ms_(batch_interval_in_ms),
|
||||
batch_size_(batch_size) {}
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class drop-stream (clause)
|
||||
((stream-name "std::string" :scope :public))
|
||||
(:public
|
||||
#>cpp
|
||||
DropStream() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
DropStream *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<DropStream>(stream_name_);
|
||||
}
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit DropStream(int uid) : Clause(uid) {}
|
||||
DropStream(int uid, std::string stream_name)
|
||||
: Clause(uid), stream_name_(std::move(stream_name)) {}
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class show-streams (clause)
|
||||
()
|
||||
(:public
|
||||
#>cpp
|
||||
ShowStreams() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
ShowStreams *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<ShowStreams>();
|
||||
}
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit ShowStreams(int uid) : Clause(uid) {}
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class start-stop-stream (clause)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(is-start :bool :scope :public)
|
||||
(limit-batches "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *")))
|
||||
(:public
|
||||
#>cpp
|
||||
StartStopStream() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
StartStopStream *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<StartStopStream>(
|
||||
stream_name_, is_start_,
|
||||
limit_batches_ ? limit_batches_->Clone(storage) : nullptr);
|
||||
}
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit StartStopStream(int uid) : Clause(uid) {}
|
||||
StartStopStream(int uid, std::string stream_name, bool is_start,
|
||||
Expression *limit_batches)
|
||||
: Clause(uid),
|
||||
stream_name_(std::move(stream_name)),
|
||||
is_start_(is_start),
|
||||
limit_batches_(limit_batches) {}
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class start-stop-all-streams (clause)
|
||||
((is-start :bool :scope :public))
|
||||
(:public
|
||||
#>cpp
|
||||
StartStopAllStreams() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
StartStopAllStreams *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<StartStopAllStreams>(is_start_);
|
||||
}
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit StartStopAllStreams(int uid) : Clause(uid) {}
|
||||
StartStopAllStreams(int uid, bool is_start)
|
||||
: Clause(uid), is_start_(is_start) {}
|
||||
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class test-stream (clause)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(limit-batches "Expression *" :scope :public
|
||||
:capnp-type "Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *")))
|
||||
(:public
|
||||
#>cpp
|
||||
TestStream() = default;
|
||||
|
||||
DEFVISITABLE(TreeVisitor<TypedValue>);
|
||||
DEFVISITABLE(HierarchicalTreeVisitor);
|
||||
|
||||
TestStream *Clone(AstStorage &storage) const override {
|
||||
return storage.Create<TestStream>(
|
||||
stream_name_,
|
||||
limit_batches_ ? limit_batches_->Clone(storage) : nullptr);
|
||||
}
|
||||
|
||||
cpp<#)
|
||||
(:protected
|
||||
#>cpp
|
||||
explicit TestStream(int uid) : Clause(uid) {}
|
||||
TestStream(int uid, std::string stream_name, Expression *limit_batches)
|
||||
: Clause(uid),
|
||||
stream_name_(std::move(stream_name)),
|
||||
limit_batches_(limit_batches) {}
|
||||
|
||||
cpp<#)
|
||||
(:private
|
||||
#>cpp
|
||||
friend class AstStorage;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
#>cpp
|
||||
#undef CLONE_BINARY_EXPRESSION
|
||||
#undef CLONE_UNARY_EXPRESSION
|
||||
|
@ -62,13 +62,6 @@ class Merge;
|
||||
class Unwind;
|
||||
class CreateIndex;
|
||||
class CreateUniqueIndex;
|
||||
class AuthQuery;
|
||||
class CreateStream;
|
||||
class DropStream;
|
||||
class ShowStreams;
|
||||
class StartStopStream;
|
||||
class StartStopAllStreams;
|
||||
class TestStream;
|
||||
|
||||
using TreeCompositeVisitor = ::utils::CompositeVisitor<
|
||||
Query, SingleQuery, CypherUnion, NamedExpression, OrOperator, XorOperator,
|
||||
@ -84,9 +77,7 @@ using TreeCompositeVisitor = ::utils::CompositeVisitor<
|
||||
|
||||
using TreeLeafVisitor =
|
||||
::utils::LeafVisitor<Identifier, PrimitiveLiteral, ParameterLookup,
|
||||
CreateIndex, CreateUniqueIndex, AuthQuery,
|
||||
CreateStream, DropStream, ShowStreams, StartStopStream,
|
||||
StartStopAllStreams, TestStream>;
|
||||
CreateIndex, CreateUniqueIndex>;
|
||||
|
||||
class HierarchicalTreeVisitor : public TreeCompositeVisitor,
|
||||
public TreeLeafVisitor {
|
||||
@ -109,8 +100,6 @@ using TreeVisitor = ::utils::Visitor<
|
||||
Aggregation, Function, Reduce, Extract, All, Single, ParameterLookup,
|
||||
Create, Match, Return, With, Pattern, NodeAtom, EdgeAtom, Delete, Where,
|
||||
SetProperty, SetProperties, SetLabels, RemoveProperty, RemoveLabels, Merge,
|
||||
Unwind, Identifier, PrimitiveLiteral, CreateIndex, CreateUniqueIndex,
|
||||
AuthQuery, CreateStream, DropStream, ShowStreams, StartStopStream,
|
||||
StartStopAllStreams, TestStream>;
|
||||
Unwind, Identifier, PrimitiveLiteral, CreateIndex, CreateUniqueIndex>;
|
||||
|
||||
} // namespace query
|
||||
|
@ -72,40 +72,6 @@ antlrcpp::Any CypherMainVisitor::visitIndexQuery(
|
||||
return query_;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitAuthQuery(
|
||||
MemgraphCypher::AuthQueryContext *ctx) {
|
||||
query_ = storage_->Create<Query>();
|
||||
query_->single_query_ = storage_->Create<SingleQuery>();
|
||||
CHECK(ctx->children.size() == 1)
|
||||
<< "AuthQuery should have exactly one child!";
|
||||
query_->single_query_->clauses_.push_back(
|
||||
ctx->children[0]->accept(this).as<AuthQuery *>());
|
||||
return query_;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitStreamQuery(
|
||||
MemgraphCypher::StreamQueryContext *ctx) {
|
||||
query_ = storage_->Create<Query>();
|
||||
query_->single_query_ = storage_->Create<SingleQuery>();
|
||||
Clause *clause = nullptr;
|
||||
if (ctx->createStream()) {
|
||||
clause = ctx->createStream()->accept(this).as<CreateStream *>();
|
||||
} else if (ctx->dropStream()) {
|
||||
clause = ctx->dropStream()->accept(this).as<DropStream *>();
|
||||
} else if (ctx->showStreams()) {
|
||||
clause = ctx->showStreams()->accept(this).as<ShowStreams *>();
|
||||
} else if (ctx->startStopStream()) {
|
||||
clause = ctx->startStopStream()->accept(this).as<StartStopStream *>();
|
||||
} else if (ctx->startStopAllStreams()) {
|
||||
clause =
|
||||
ctx->startStopAllStreams()->accept(this).as<StartStopAllStreams *>();
|
||||
} else if (ctx->testStream()) {
|
||||
clause = ctx->testStream()->accept(this).as<TestStream *>();
|
||||
}
|
||||
query_->single_query_->clauses_ = {clause};
|
||||
return query_;
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCypherUnion(
|
||||
MemgraphCypher::CypherUnionContext *ctx) {
|
||||
bool distinct = !ctx->ALL();
|
||||
@ -293,352 +259,6 @@ antlrcpp::Any CypherMainVisitor::visitCreateUniqueIndex(
|
||||
dba_->Label(ctx->labelName()->accept(this)), properties);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return std::string
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitUserOrRoleName(
|
||||
MemgraphCypher::UserOrRoleNameContext *ctx) {
|
||||
std::string value = ctx->symbolicName()->accept(this).as<std::string>();
|
||||
const std::regex NAME_REGEX("[a-zA-Z0-9_.+-]+");
|
||||
if (!std::regex_match(value, NAME_REGEX)) {
|
||||
throw SyntaxException("Invalid user or role name.");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateRole(
|
||||
MemgraphCypher::CreateRoleContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::CREATE_ROLE;
|
||||
auth->role_ = ctx->role->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitDropRole(
|
||||
MemgraphCypher::DropRoleContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::DROP_ROLE;
|
||||
auth->role_ = ctx->role->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitShowRoles(
|
||||
MemgraphCypher::ShowRolesContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SHOW_ROLES;
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateUser(
|
||||
MemgraphCypher::CreateUserContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::CREATE_USER;
|
||||
auth->user_ = ctx->user->accept(this).as<std::string>();
|
||||
if (ctx->password) {
|
||||
if (!ctx->password->StringLiteral() && !ctx->literal()->CYPHERNULL()) {
|
||||
throw SyntaxException("Password should be a string literal or null.");
|
||||
}
|
||||
auth->password_ = ctx->password->accept(this);
|
||||
}
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitSetPassword(
|
||||
MemgraphCypher::SetPasswordContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SET_PASSWORD;
|
||||
auth->user_ = ctx->user->accept(this).as<std::string>();
|
||||
if (!ctx->password->StringLiteral() && !ctx->literal()->CYPHERNULL()) {
|
||||
throw SyntaxException("Password should be a string literal or null.");
|
||||
}
|
||||
auth->password_ = ctx->password->accept(this);
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitDropUser(
|
||||
MemgraphCypher::DropUserContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::DROP_USER;
|
||||
auth->user_ = ctx->user->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitShowUsers(
|
||||
MemgraphCypher::ShowUsersContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SHOW_USERS;
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitSetRole(
|
||||
MemgraphCypher::SetRoleContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SET_ROLE;
|
||||
auth->user_ = ctx->user->accept(this).as<std::string>();
|
||||
auth->role_ = ctx->role->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitClearRole(
|
||||
MemgraphCypher::ClearRoleContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::CLEAR_ROLE;
|
||||
auth->user_ = ctx->user->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitGrantPrivilege(
|
||||
MemgraphCypher::GrantPrivilegeContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::GRANT_PRIVILEGE;
|
||||
auth->user_or_role_ = ctx->userOrRole->accept(this).as<std::string>();
|
||||
if (ctx->privilegeList()) {
|
||||
for (auto *privilege : ctx->privilegeList()->privilege()) {
|
||||
auth->privileges_.push_back(privilege->accept(this));
|
||||
}
|
||||
} else {
|
||||
/* grant all privileges */
|
||||
auth->privileges_ = kPrivilegesAll;
|
||||
}
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitDenyPrivilege(
|
||||
MemgraphCypher::DenyPrivilegeContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::DENY_PRIVILEGE;
|
||||
auth->user_or_role_ = ctx->userOrRole->accept(this).as<std::string>();
|
||||
if (ctx->privilegeList()) {
|
||||
for (auto *privilege : ctx->privilegeList()->privilege()) {
|
||||
auth->privileges_.push_back(privilege->accept(this));
|
||||
}
|
||||
} else {
|
||||
/* deny all privileges */
|
||||
auth->privileges_ = kPrivilegesAll;
|
||||
}
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitRevokePrivilege(
|
||||
MemgraphCypher::RevokePrivilegeContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::REVOKE_PRIVILEGE;
|
||||
auth->user_or_role_ = ctx->userOrRole->accept(this).as<std::string>();
|
||||
if (ctx->privilegeList()) {
|
||||
for (auto *privilege : ctx->privilegeList()->privilege()) {
|
||||
auth->privileges_.push_back(privilege->accept(this));
|
||||
}
|
||||
} else {
|
||||
/* revoke all privileges */
|
||||
auth->privileges_ = kPrivilegesAll;
|
||||
}
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery::Privilege
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitPrivilege(
|
||||
MemgraphCypher::PrivilegeContext *ctx) {
|
||||
if (ctx->CREATE()) return AuthQuery::Privilege::CREATE;
|
||||
if (ctx->DELETE()) return AuthQuery::Privilege::DELETE;
|
||||
if (ctx->MATCH()) return AuthQuery::Privilege::MATCH;
|
||||
if (ctx->MERGE()) return AuthQuery::Privilege::MERGE;
|
||||
if (ctx->SET()) return AuthQuery::Privilege::SET;
|
||||
if (ctx->REMOVE()) return AuthQuery::Privilege::REMOVE;
|
||||
if (ctx->INDEX()) return AuthQuery::Privilege::INDEX;
|
||||
if (ctx->AUTH()) return AuthQuery::Privilege::AUTH;
|
||||
if (ctx->STREAM()) return AuthQuery::Privilege::STREAM;
|
||||
LOG(FATAL) << "Should not get here - unknown privilege!";
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitShowPrivileges(
|
||||
MemgraphCypher::ShowPrivilegesContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SHOW_PRIVILEGES;
|
||||
auth->user_or_role_ = ctx->userOrRole->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitShowRoleForUser(
|
||||
MemgraphCypher::ShowRoleForUserContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SHOW_ROLE_FOR_USER;
|
||||
auth->user_ = ctx->user->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitShowUsersForRole(
|
||||
MemgraphCypher::ShowUsersForRoleContext *ctx) {
|
||||
AuthQuery *auth = storage_->Create<AuthQuery>();
|
||||
auth->action_ = AuthQuery::Action::SHOW_USERS_FOR_ROLE;
|
||||
auth->role_ = ctx->role->accept(this).as<std::string>();
|
||||
return auth;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return CreateStream*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitCreateStream(
|
||||
MemgraphCypher::CreateStreamContext *ctx) {
|
||||
std::string stream_name(ctx->streamName()->getText());
|
||||
if (!ctx->streamUri->StringLiteral()) {
|
||||
throw SyntaxException("Stream URI should be a string literal.");
|
||||
}
|
||||
Expression *stream_uri = ctx->streamUri->accept(this);
|
||||
|
||||
if (!ctx->streamTopic->StringLiteral()) {
|
||||
throw SyntaxException("Topic should be a string literal.");
|
||||
}
|
||||
Expression *stream_topic = ctx->streamTopic->accept(this);
|
||||
|
||||
if (!ctx->transformUri->StringLiteral()) {
|
||||
throw SyntaxException("Transform URI should be a string literal.");
|
||||
}
|
||||
Expression *transform_uri = ctx->transformUri->accept(this);
|
||||
|
||||
Expression *batch_interval_in_ms = nullptr;
|
||||
if (ctx->batchIntervalOption()) {
|
||||
batch_interval_in_ms = ctx->batchIntervalOption()->accept(this);
|
||||
}
|
||||
|
||||
Expression *batch_size = nullptr;
|
||||
if (ctx->batchSizeOption()) {
|
||||
batch_size = ctx->batchSizeOption()->accept(this);
|
||||
}
|
||||
|
||||
return storage_->Create<CreateStream>(stream_name, stream_uri, stream_topic,
|
||||
transform_uri, batch_interval_in_ms,
|
||||
batch_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Expression*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitBatchIntervalOption(
|
||||
MemgraphCypher::BatchIntervalOptionContext *ctx) {
|
||||
if (!ctx->literal()->numberLiteral() ||
|
||||
!ctx->literal()->numberLiteral()->integerLiteral()) {
|
||||
throw SyntaxException("Batch interval should be an integer.");
|
||||
}
|
||||
return ctx->literal()->accept(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Expression*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitBatchSizeOption(
|
||||
MemgraphCypher::BatchSizeOptionContext *ctx) {
|
||||
if (!ctx->literal()->numberLiteral() ||
|
||||
!ctx->literal()->numberLiteral()->integerLiteral()) {
|
||||
throw SyntaxException("Batch size should be an integer.");
|
||||
}
|
||||
return ctx->literal()->accept(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return DropStream*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitDropStream(
|
||||
MemgraphCypher::DropStreamContext *ctx) {
|
||||
return storage_->Create<DropStream>(
|
||||
std::string(ctx->streamName()->getText()));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return ShowStreams*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitShowStreams(
|
||||
MemgraphCypher::ShowStreamsContext *ctx) {
|
||||
return storage_->Create<ShowStreams>();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return StartStopStream*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitStartStopStream(
|
||||
MemgraphCypher::StartStopStreamContext *ctx) {
|
||||
std::string stream_name(std::string(ctx->streamName()->getText()));
|
||||
bool is_start = static_cast<bool>(ctx->START());
|
||||
Expression *limit_batches = nullptr;
|
||||
|
||||
if (ctx->limitBatchesOption()) {
|
||||
if (!is_start) {
|
||||
throw SyntaxException("STOP STREAM can't set batch limit.");
|
||||
}
|
||||
limit_batches = ctx->limitBatchesOption()->accept(this);
|
||||
}
|
||||
|
||||
return storage_->Create<StartStopStream>(stream_name, is_start,
|
||||
limit_batches);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Expression*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitLimitBatchesOption(
|
||||
MemgraphCypher::LimitBatchesOptionContext *ctx) {
|
||||
if (!ctx->literal()->numberLiteral() ||
|
||||
!ctx->literal()->numberLiteral()->integerLiteral()) {
|
||||
throw SyntaxException("Batch limit should be an integer.");
|
||||
}
|
||||
return ctx->literal()->accept(this);
|
||||
}
|
||||
|
||||
/*
|
||||
* @return StartStopAllStreams*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitStartStopAllStreams(
|
||||
MemgraphCypher::StartStopAllStreamsContext *ctx) {
|
||||
bool is_start = static_cast<bool>(ctx->START());
|
||||
return storage_->Create<StartStopAllStreams>(is_start);
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitCypherReturn(
|
||||
MemgraphCypher::CypherReturnContext *ctx) {
|
||||
auto *return_clause = storage_->Create<Return>();
|
||||
@ -649,21 +269,6 @@ antlrcpp::Any CypherMainVisitor::visitCypherReturn(
|
||||
return return_clause;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return TestStream*
|
||||
*/
|
||||
antlrcpp::Any CypherMainVisitor::visitTestStream(
|
||||
MemgraphCypher::TestStreamContext *ctx) {
|
||||
std::string stream_name(std::string(ctx->streamName()->getText()));
|
||||
Expression *limit_batches = nullptr;
|
||||
|
||||
if (ctx->limitBatchesOption()) {
|
||||
limit_batches = ctx->limitBatchesOption()->accept(this);
|
||||
}
|
||||
|
||||
return storage_->Create<TestStream>(stream_name, limit_batches);
|
||||
}
|
||||
|
||||
antlrcpp::Any CypherMainVisitor::visitReturnBody(
|
||||
MemgraphCypher::ReturnBodyContext *ctx) {
|
||||
ReturnBody body;
|
||||
|
@ -152,17 +152,6 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
antlrcpp::Any visitExplainQuery(
|
||||
MemgraphCypher::ExplainQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return Query*
|
||||
*/
|
||||
antlrcpp::Any visitAuthQuery(MemgraphCypher::AuthQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return Query*
|
||||
*/
|
||||
antlrcpp::Any visitStreamQuery(
|
||||
MemgraphCypher::StreamQueryContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CypherUnion*
|
||||
*/
|
||||
@ -191,28 +180,6 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
*/
|
||||
antlrcpp::Any visitCreate(MemgraphCypher::CreateContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return std::string
|
||||
*/
|
||||
antlrcpp::Any visitUserOrRoleName(
|
||||
MemgraphCypher::UserOrRoleNameContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCreateRole(
|
||||
MemgraphCypher::CreateRoleContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitDropRole(MemgraphCypher::DropRoleContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowRoles(MemgraphCypher::ShowRolesContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CreateIndex*
|
||||
*/
|
||||
@ -225,124 +192,6 @@ class CypherMainVisitor : public antlropencypher::MemgraphCypherBaseVisitor {
|
||||
antlrcpp::Any visitCreateUniqueIndex(
|
||||
MemgraphCypher::CreateUniqueIndexContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitCreateUser(
|
||||
MemgraphCypher::CreateUserContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitSetPassword(
|
||||
MemgraphCypher::SetPasswordContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitDropUser(MemgraphCypher::DropUserContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowUsers(MemgraphCypher::ShowUsersContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitSetRole(MemgraphCypher::SetRoleContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitClearRole(MemgraphCypher::ClearRoleContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitGrantPrivilege(
|
||||
MemgraphCypher::GrantPrivilegeContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitDenyPrivilege(
|
||||
MemgraphCypher::DenyPrivilegeContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitRevokePrivilege(
|
||||
MemgraphCypher::RevokePrivilegeContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery::Privilege
|
||||
*/
|
||||
antlrcpp::Any visitPrivilege(MemgraphCypher::PrivilegeContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowPrivileges(
|
||||
MemgraphCypher::ShowPrivilegesContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowRoleForUser(
|
||||
MemgraphCypher::ShowRoleForUserContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return AuthQuery*
|
||||
*/
|
||||
antlrcpp::Any visitShowUsersForRole(
|
||||
MemgraphCypher::ShowUsersForRoleContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return CreateStream*
|
||||
*/
|
||||
antlrcpp::Any visitCreateStream(
|
||||
MemgraphCypher::CreateStreamContext *ctx) override;
|
||||
|
||||
antlrcpp::Any visitBatchIntervalOption(
|
||||
MemgraphCypher::BatchIntervalOptionContext *ctx) override;
|
||||
|
||||
antlrcpp::Any visitBatchSizeOption(
|
||||
MemgraphCypher::BatchSizeOptionContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return DropStream*
|
||||
*/
|
||||
antlrcpp::Any visitDropStream(
|
||||
MemgraphCypher::DropStreamContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return ShowStreams*
|
||||
*/
|
||||
antlrcpp::Any visitShowStreams(
|
||||
MemgraphCypher::ShowStreamsContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return StartStopStream*
|
||||
*/
|
||||
antlrcpp::Any visitStartStopStream(
|
||||
MemgraphCypher::StartStopStreamContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return StartStopAllStreams*
|
||||
*/
|
||||
antlrcpp::Any visitStartStopAllStreams(
|
||||
MemgraphCypher::StartStopAllStreamsContext *ctx) override;
|
||||
|
||||
antlrcpp::Any visitLimitBatchesOption(
|
||||
MemgraphCypher::LimitBatchesOptionContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return TestStream*
|
||||
*/
|
||||
antlrcpp::Any visitTestStream(
|
||||
MemgraphCypher::TestStreamContext *ctx) override;
|
||||
|
||||
/**
|
||||
* @return Return*
|
||||
*/
|
||||
|
@ -5,134 +5,3 @@ parser grammar MemgraphCypher ;
|
||||
options { tokenVocab=MemgraphCypherLexer; }
|
||||
|
||||
import Cypher ;
|
||||
|
||||
memgraphCypherKeyword : cypherKeyword
|
||||
| ALTER
|
||||
| AUTH
|
||||
| BATCH
|
||||
| BATCHES
|
||||
| CLEAR
|
||||
| DATA
|
||||
| DENY
|
||||
| DROP
|
||||
| FOR
|
||||
| FROM
|
||||
| GRANT
|
||||
| IDENTIFIED
|
||||
| INTERVAL
|
||||
| K_TEST
|
||||
| KAFKA
|
||||
| LOAD
|
||||
| PASSWORD
|
||||
| PRIVILEGES
|
||||
| REVOKE
|
||||
| ROLE
|
||||
| ROLES
|
||||
| SIZE
|
||||
| START
|
||||
| STOP
|
||||
| STREAM
|
||||
| STREAMS
|
||||
| TO
|
||||
| TOPIC
|
||||
| TRANSFORM
|
||||
| USER
|
||||
| USERS
|
||||
;
|
||||
|
||||
symbolicName : UnescapedSymbolicName
|
||||
| EscapedSymbolicName
|
||||
| memgraphCypherKeyword
|
||||
;
|
||||
|
||||
query : cypherQuery
|
||||
| indexQuery
|
||||
| explainQuery
|
||||
| authQuery
|
||||
| streamQuery
|
||||
;
|
||||
|
||||
authQuery : createRole
|
||||
| dropRole
|
||||
| showRoles
|
||||
| createUser
|
||||
| setPassword
|
||||
| dropUser
|
||||
| showUsers
|
||||
| setRole
|
||||
| clearRole
|
||||
| grantPrivilege
|
||||
| denyPrivilege
|
||||
| revokePrivilege
|
||||
| showPrivileges
|
||||
| showRoleForUser
|
||||
| showUsersForRole
|
||||
;
|
||||
|
||||
userOrRoleName : symbolicName ;
|
||||
|
||||
createRole : CREATE ROLE role=userOrRoleName ;
|
||||
|
||||
dropRole : DROP ROLE role=userOrRoleName ;
|
||||
|
||||
showRoles : SHOW ROLES ;
|
||||
|
||||
createUser : CREATE USER user=userOrRoleName
|
||||
( IDENTIFIED BY password=literal )? ;
|
||||
|
||||
setPassword : SET PASSWORD FOR user=userOrRoleName TO password=literal;
|
||||
|
||||
dropUser : DROP USER user=userOrRoleName ;
|
||||
|
||||
showUsers : SHOW USERS ;
|
||||
|
||||
setRole : SET ROLE FOR user=userOrRoleName TO role=userOrRoleName;
|
||||
|
||||
clearRole : CLEAR ROLE FOR user=userOrRoleName ;
|
||||
|
||||
grantPrivilege : GRANT ( ALL PRIVILEGES | privileges=privilegeList ) TO userOrRole=userOrRoleName ;
|
||||
|
||||
denyPrivilege : DENY ( ALL PRIVILEGES | privileges=privilegeList ) TO userOrRole=userOrRoleName ;
|
||||
|
||||
revokePrivilege : REVOKE ( ALL PRIVILEGES | privileges=privilegeList ) FROM userOrRole=userOrRoleName ;
|
||||
|
||||
privilege : CREATE | DELETE | MATCH | MERGE | SET
|
||||
| REMOVE | INDEX | AUTH | STREAM ;
|
||||
|
||||
privilegeList : privilege ( ',' privilege )* ;
|
||||
|
||||
showPrivileges : SHOW PRIVILEGES FOR userOrRole=userOrRoleName ;
|
||||
|
||||
showRoleForUser : SHOW ROLE FOR user=userOrRoleName ;
|
||||
|
||||
showUsersForRole : SHOW USERS FOR role=userOrRoleName ;
|
||||
|
||||
streamQuery : createStream
|
||||
| dropStream
|
||||
| showStreams
|
||||
| startStopStream
|
||||
| startStopAllStreams
|
||||
| testStream
|
||||
;
|
||||
|
||||
streamName : symbolicName ;
|
||||
|
||||
createStream : CREATE STREAM streamName AS LOAD DATA KAFKA
|
||||
streamUri=literal WITH TOPIC streamTopic=literal WITH TRANSFORM
|
||||
transformUri=literal ( batchIntervalOption )? ( batchSizeOption )? ;
|
||||
|
||||
batchIntervalOption : BATCH INTERVAL literal ;
|
||||
|
||||
batchSizeOption : BATCH SIZE literal ;
|
||||
|
||||
dropStream : DROP STREAM streamName ;
|
||||
|
||||
showStreams : SHOW STREAMS ;
|
||||
|
||||
startStopStream : ( START | STOP ) STREAM streamName ( limitBatchesOption )? ;
|
||||
|
||||
limitBatchesOption : LIMIT limitBatches=literal BATCHES ;
|
||||
|
||||
startStopAllStreams : ( START | STOP ) ALL STREAMS ;
|
||||
|
||||
testStream : K_TEST STREAM streamName ( limitBatchesOption )? ;
|
||||
|
@ -9,36 +9,3 @@
|
||||
lexer grammar MemgraphCypherLexer ;
|
||||
|
||||
import CypherLexer ;
|
||||
|
||||
ALTER : A L T E R ;
|
||||
AUTH : A U T H ;
|
||||
BATCH : B A T C H ;
|
||||
BATCHES : B A T C H E S ;
|
||||
CLEAR : C L E A R ;
|
||||
DATA : D A T A ;
|
||||
DENY : D E N Y ;
|
||||
DROP : D R O P ;
|
||||
FOR : F O R ;
|
||||
FROM : F R O M ;
|
||||
GRANT : G R A N T ;
|
||||
GRANTS : G R A N T S ;
|
||||
IDENTIFIED : I D E N T I F I E D ;
|
||||
INTERVAL : I N T E R V A L ;
|
||||
K_TEST : T E S T ;
|
||||
KAFKA : K A F K A ;
|
||||
LOAD : L O A D ;
|
||||
PASSWORD : P A S S W O R D ;
|
||||
PRIVILEGES : P R I V I L E G E S ;
|
||||
REVOKE : R E V O K E ;
|
||||
ROLE : R O L E ;
|
||||
ROLES : R O L E S ;
|
||||
SIZE : S I Z E ;
|
||||
START : S T A R T ;
|
||||
STOP : S T O P ;
|
||||
STREAM : S T R E A M ;
|
||||
STREAMS : S T R E A M S ;
|
||||
TO : T O ;
|
||||
TOPIC : T O P I C ;
|
||||
TRANSFORM : T R A N S F O R M ;
|
||||
USER : U S E R ;
|
||||
USERS : U S E R S ;
|
||||
|
@ -222,20 +222,6 @@ bool SymbolGenerator::Visit(CreateIndex &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(CreateUniqueIndex &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(AuthQuery &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(CreateStream &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(DropStream &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(ShowStreams &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(StartStopStream &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(StartStopAllStreams &) { return true; }
|
||||
|
||||
bool SymbolGenerator::Visit(TestStream &) { return true; }
|
||||
|
||||
// Expressions
|
||||
|
||||
SymbolGenerator::ReturnType SymbolGenerator::Visit(Identifier &ident) {
|
||||
|
@ -48,13 +48,6 @@ class SymbolGenerator : public HierarchicalTreeVisitor {
|
||||
bool PostVisit(Match &) override;
|
||||
bool Visit(CreateIndex &) override;
|
||||
bool Visit(CreateUniqueIndex &) override;
|
||||
bool Visit(AuthQuery &) override;
|
||||
bool Visit(CreateStream &) override;
|
||||
bool Visit(DropStream &) override;
|
||||
bool Visit(ShowStreams &) override;
|
||||
bool Visit(StartStopStream &) override;
|
||||
bool Visit(StartStopAllStreams &) override;
|
||||
bool Visit(TestStream &) override;
|
||||
|
||||
// Expressions
|
||||
ReturnType Visit(Identifier &) override;
|
||||
|
@ -57,13 +57,6 @@ class ExpressionEvaluator : public TreeVisitor<TypedValue> {
|
||||
BLOCK_VISIT(Unwind);
|
||||
BLOCK_VISIT(CreateIndex);
|
||||
BLOCK_VISIT(CreateUniqueIndex);
|
||||
BLOCK_VISIT(AuthQuery);
|
||||
BLOCK_VISIT(CreateStream);
|
||||
BLOCK_VISIT(DropStream);
|
||||
BLOCK_VISIT(ShowStreams);
|
||||
BLOCK_VISIT(StartStopStream);
|
||||
BLOCK_VISIT(StartStopAllStreams);
|
||||
BLOCK_VISIT(TestStream);
|
||||
|
||||
#undef BLOCK_VISIT
|
||||
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include "query/exceptions.hpp"
|
||||
#include "query/frontend/ast/cypher_main_visitor.hpp"
|
||||
#include "query/frontend/opencypher/parser.hpp"
|
||||
#include "query/frontend/semantic/required_privileges.hpp"
|
||||
#include "query/frontend/semantic/symbol_generator.hpp"
|
||||
#include "query/plan/planner.hpp"
|
||||
#include "query/plan/vertex_count_cache.hpp"
|
||||
@ -51,8 +50,6 @@ Interpreter::Results Interpreter::operator()(
|
||||
|
||||
Context ctx(db_accessor);
|
||||
ctx.in_explicit_transaction_ = in_explicit_transaction;
|
||||
ctx.auth_ = auth_;
|
||||
ctx.kafka_streams_ = kafka_streams_;
|
||||
ctx.evaluation_context_ = evaluation_context;
|
||||
|
||||
ParsingContext parsing_context;
|
||||
@ -61,9 +58,6 @@ Interpreter::Results Interpreter::operator()(
|
||||
AstStorage ast_storage;
|
||||
Query *ast =
|
||||
QueryToAst(stripped, parsing_context, &ast_storage, &db_accessor);
|
||||
// TODO: Maybe cache required privileges to improve performance on very simple
|
||||
// queries.
|
||||
auto required_privileges = query::GetRequiredPrivileges(ast);
|
||||
auto frontend_time = frontend_timer.Elapsed();
|
||||
|
||||
// Try to get a cached plan. Note that this local shared_ptr might be the only
|
||||
@ -114,7 +108,7 @@ Interpreter::Results Interpreter::operator()(
|
||||
}
|
||||
|
||||
return Results(std::move(ctx), plan, std::move(cursor), output_symbols,
|
||||
header, summary, plan_cache_, required_privileges);
|
||||
header, summary, plan_cache_);
|
||||
}
|
||||
|
||||
std::shared_ptr<Interpreter::CachedPlan> Interpreter::AstToPlan(
|
||||
|
@ -16,14 +16,6 @@
|
||||
DECLARE_bool(query_cost_planner);
|
||||
DECLARE_int32(query_plan_cache_ttl);
|
||||
|
||||
namespace auth {
|
||||
class Auth;
|
||||
} // namespace auth
|
||||
|
||||
namespace integrations::kafka {
|
||||
class Streams;
|
||||
} // namespace integrations::kafka
|
||||
|
||||
namespace query {
|
||||
|
||||
// TODO: Maybe this should move to query/plan/planner.
|
||||
@ -74,8 +66,7 @@ class Interpreter {
|
||||
Results(Context ctx, std::shared_ptr<CachedPlan> plan,
|
||||
std::unique_ptr<query::plan::Cursor> cursor,
|
||||
std::vector<Symbol> output_symbols, std::vector<std::string> header,
|
||||
std::map<std::string, TypedValue> summary, PlanCacheT &plan_cache,
|
||||
std::vector<AuthQuery::Privilege> privileges)
|
||||
std::map<std::string, TypedValue> summary, PlanCacheT &plan_cache)
|
||||
: ctx_(std::move(ctx)),
|
||||
plan_(plan),
|
||||
cursor_(std::move(cursor)),
|
||||
@ -83,8 +74,7 @@ class Interpreter {
|
||||
output_symbols_(output_symbols),
|
||||
header_(header),
|
||||
summary_(summary),
|
||||
plan_cache_(plan_cache),
|
||||
privileges_(std::move(privileges)) {}
|
||||
plan_cache_(plan_cache) {}
|
||||
|
||||
public:
|
||||
Results(const Results &) = delete;
|
||||
@ -140,10 +130,6 @@ class Interpreter {
|
||||
const std::vector<std::string> &header() { return header_; }
|
||||
const std::map<std::string, TypedValue> &summary() { return summary_; }
|
||||
|
||||
const std::vector<AuthQuery::Privilege> &privileges() {
|
||||
return privileges_;
|
||||
}
|
||||
|
||||
private:
|
||||
Context ctx_;
|
||||
std::shared_ptr<CachedPlan> plan_;
|
||||
@ -157,8 +143,6 @@ class Interpreter {
|
||||
double execution_time_{0};
|
||||
// Gets invalidated after if an index has been built.
|
||||
PlanCacheT &plan_cache_;
|
||||
|
||||
std::vector<AuthQuery::Privilege> privileges_;
|
||||
};
|
||||
|
||||
Interpreter() = default;
|
||||
@ -178,9 +162,6 @@ class Interpreter {
|
||||
const std::map<std::string, PropertyValue> ¶ms,
|
||||
bool in_explicit_transaction);
|
||||
|
||||
auth::Auth *auth_ = nullptr;
|
||||
integrations::kafka::Streams *kafka_streams_ = nullptr;
|
||||
|
||||
protected:
|
||||
// high level tree -> logical plan
|
||||
// AstStorage and SymbolTable may be modified during planning. The created
|
||||
|
@ -185,13 +185,6 @@ class CostEstimator : public HierarchicalLogicalOperatorVisitor {
|
||||
|
||||
bool Visit(Once &) override { return true; }
|
||||
bool Visit(CreateIndex &) override { return true; }
|
||||
bool Visit(AuthHandler &) override { return true; }
|
||||
bool Visit(CreateStream &) override { return true; }
|
||||
bool Visit(DropStream &) override { return true; }
|
||||
bool Visit(ShowStreams &) override { return true; }
|
||||
bool Visit(StartStopStream &) override { return true; }
|
||||
bool Visit(StartStopAllStreams &) override { return true; }
|
||||
bool Visit(TestStream &) override { return true; }
|
||||
|
||||
// TODO: Cost estimate PullRemote and ProduceRemote?
|
||||
|
||||
|
@ -13,13 +13,9 @@
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "auth/auth.hpp"
|
||||
#include "communication/result_stream_faker.hpp"
|
||||
#include "database/graph_db_accessor.hpp"
|
||||
#include "glue/auth.hpp"
|
||||
#include "glue/communication.hpp"
|
||||
#include "integrations/kafka/exceptions.hpp"
|
||||
#include "integrations/kafka/streams.hpp"
|
||||
#include "query/context.hpp"
|
||||
#include "query/exceptions.hpp"
|
||||
#include "query/frontend/ast/ast.hpp"
|
||||
@ -3247,743 +3243,6 @@ std::unique_ptr<Cursor> Cartesian::MakeCursor(
|
||||
return std::make_unique<CartesianCursor>(*this, db);
|
||||
}
|
||||
|
||||
AuthHandler::AuthHandler(AuthQuery::Action action, std::string user,
|
||||
std::string role, std::string user_or_role,
|
||||
Expression *password,
|
||||
std::vector<AuthQuery::Privilege> privileges,
|
||||
Symbol user_symbol, Symbol role_symbol,
|
||||
Symbol privilege_symbol, Symbol effective_symbol,
|
||||
Symbol details_symbol)
|
||||
: action_(action),
|
||||
user_(user),
|
||||
role_(role),
|
||||
user_or_role_(user_or_role),
|
||||
password_(password),
|
||||
privileges_(privileges),
|
||||
user_symbol_(user_symbol),
|
||||
role_symbol_(role_symbol),
|
||||
privilege_symbol_(privilege_symbol),
|
||||
effective_symbol_(effective_symbol),
|
||||
details_symbol_(details_symbol) {}
|
||||
|
||||
bool AuthHandler::Accept(HierarchicalLogicalOperatorVisitor &visitor) {
|
||||
return visitor.Visit(*this);
|
||||
}
|
||||
|
||||
std::vector<Symbol> AuthHandler::OutputSymbols(const SymbolTable &) const {
|
||||
switch (action_) {
|
||||
case AuthQuery::Action::SHOW_USERS:
|
||||
case AuthQuery::Action::SHOW_USERS_FOR_ROLE:
|
||||
return {user_symbol_};
|
||||
|
||||
case AuthQuery::Action::SHOW_ROLES:
|
||||
case AuthQuery::Action::SHOW_ROLE_FOR_USER:
|
||||
return {role_symbol_};
|
||||
|
||||
case AuthQuery::Action::SHOW_PRIVILEGES:
|
||||
return {privilege_symbol_, effective_symbol_, details_symbol_};
|
||||
|
||||
case AuthQuery::Action::CREATE_USER:
|
||||
case AuthQuery::Action::DROP_USER:
|
||||
case AuthQuery::Action::SET_PASSWORD:
|
||||
case AuthQuery::Action::CREATE_ROLE:
|
||||
case AuthQuery::Action::DROP_ROLE:
|
||||
case AuthQuery::Action::SET_ROLE:
|
||||
case AuthQuery::Action::CLEAR_ROLE:
|
||||
case AuthQuery::Action::GRANT_PRIVILEGE:
|
||||
case AuthQuery::Action::DENY_PRIVILEGE:
|
||||
case AuthQuery::Action::REVOKE_PRIVILEGE:
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
class AuthHandlerCursor : public Cursor {
|
||||
public:
|
||||
AuthHandlerCursor(const AuthHandler &self) : self_(self) {}
|
||||
|
||||
std::vector<auth::Permission> GetAuthPermissions() {
|
||||
std::vector<auth::Permission> ret;
|
||||
for (const auto &privilege : self_.privileges_) {
|
||||
ret.push_back(glue::PrivilegeToPermission(privilege));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<std::tuple<std::string, std::string, std::string>>
|
||||
GetGrantsForAuthUser(const auth::User &user) {
|
||||
std::vector<std::tuple<std::string, std::string, std::string>> ret;
|
||||
const auto &permissions = user.GetPermissions();
|
||||
for (const auto &privilege : kPrivilegesAll) {
|
||||
auto permission = glue::PrivilegeToPermission(privilege);
|
||||
auto effective = permissions.Has(permission);
|
||||
if (permissions.Has(permission) != auth::PermissionLevel::NEUTRAL) {
|
||||
std::vector<std::string> description;
|
||||
auto user_level = user.permissions().Has(permission);
|
||||
if (user_level == auth::PermissionLevel::GRANT) {
|
||||
description.push_back("GRANTED TO USER");
|
||||
} else if (user_level == auth::PermissionLevel::DENY) {
|
||||
description.push_back("DENIED TO USER");
|
||||
}
|
||||
if (user.role()) {
|
||||
auto role_level = user.role()->permissions().Has(permission);
|
||||
if (role_level == auth::PermissionLevel::GRANT) {
|
||||
description.push_back("GRANTED TO ROLE");
|
||||
} else if (role_level == auth::PermissionLevel::DENY) {
|
||||
description.push_back("DENIED TO ROLE");
|
||||
}
|
||||
}
|
||||
ret.push_back({auth::PermissionToString(permission),
|
||||
auth::PermissionLevelToString(effective),
|
||||
utils::Join(description, ", ")});
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::vector<std::tuple<std::string, std::string, std::string>>
|
||||
GetGrantsForAuthRole(const auth::Role &role) {
|
||||
std::vector<std::tuple<std::string, std::string, std::string>> ret;
|
||||
const auto &permissions = role.permissions();
|
||||
for (const auto &privilege : kPrivilegesAll) {
|
||||
auto permission = glue::PrivilegeToPermission(privilege);
|
||||
auto effective = permissions.Has(permission);
|
||||
if (effective != auth::PermissionLevel::NEUTRAL) {
|
||||
std::string description;
|
||||
if (effective == auth::PermissionLevel::GRANT) {
|
||||
description = "GRANTED TO ROLE";
|
||||
} else if (effective == auth::PermissionLevel::DENY) {
|
||||
description = "DENIED TO ROLE";
|
||||
}
|
||||
ret.push_back({auth::PermissionToString(permission),
|
||||
auth::PermissionLevelToString(effective), description});
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw UserModificationInMulticommandTxException();
|
||||
}
|
||||
|
||||
ExpressionEvaluator evaluator(&frame, ctx.symbol_table_,
|
||||
ctx.evaluation_context_, &ctx.db_accessor_,
|
||||
GraphView::OLD);
|
||||
std::experimental::optional<std::string> password;
|
||||
if (self_.password_) {
|
||||
auto password_tv = self_.password_->Accept(evaluator);
|
||||
if (!password_tv.IsString() && !password_tv.IsNull()) {
|
||||
throw QueryRuntimeException(
|
||||
"Expected string or null for password, got {}.",
|
||||
password_tv.type());
|
||||
}
|
||||
if (password_tv.IsString()) {
|
||||
password = password_tv.ValueString();
|
||||
}
|
||||
}
|
||||
|
||||
auto &auth = *ctx.auth_;
|
||||
|
||||
switch (self_.action_) {
|
||||
case AuthQuery::Action::CREATE_USER: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.AddUser(self_.user_, password);
|
||||
if (!user) {
|
||||
throw QueryRuntimeException("User or role '{}' already exists.",
|
||||
self_.user_);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::DROP_USER: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_);
|
||||
if (!user) {
|
||||
throw QueryRuntimeException("User '{}' doesn't exist.", self_.user_);
|
||||
}
|
||||
if (!auth.RemoveUser(self_.user_)) {
|
||||
throw QueryRuntimeException("Couldn't remove user '{}'.",
|
||||
self_.user_);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SET_PASSWORD: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_);
|
||||
if (!user) {
|
||||
throw QueryRuntimeException("User '{}' doesn't exist.", self_.user_);
|
||||
}
|
||||
user->UpdatePassword(password);
|
||||
auth.SaveUser(*user);
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::CREATE_ROLE: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto role = auth.AddRole(self_.role_);
|
||||
if (!role) {
|
||||
throw QueryRuntimeException("User or role '{}' already exists.",
|
||||
self_.role_);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::DROP_ROLE: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto role = auth.GetRole(self_.role_);
|
||||
if (!role) {
|
||||
throw QueryRuntimeException("Role '{}' doesn't exist.", self_.role_);
|
||||
}
|
||||
if (!auth.RemoveRole(self_.role_)) {
|
||||
throw QueryRuntimeException("Couldn't remove role '{}'.",
|
||||
self_.role_);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SHOW_USERS: {
|
||||
if (!users_) {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
users_.emplace(auth.AllUsers());
|
||||
users_it_ = users_->begin();
|
||||
}
|
||||
|
||||
if (users_it_ == users_->end()) return false;
|
||||
|
||||
frame[self_.user_symbol_] = users_it_->username();
|
||||
users_it_++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SHOW_ROLES: {
|
||||
if (!roles_) {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
roles_.emplace(auth.AllRoles());
|
||||
roles_it_ = roles_->begin();
|
||||
}
|
||||
|
||||
if (roles_it_ == roles_->end()) return false;
|
||||
|
||||
frame[self_.role_symbol_] = roles_it_->rolename();
|
||||
roles_it_++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SET_ROLE: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_);
|
||||
if (!user) {
|
||||
throw QueryRuntimeException("User '{}' doesn't exist.", self_.user_);
|
||||
}
|
||||
auto role = auth.GetRole(self_.role_);
|
||||
if (!role) {
|
||||
throw QueryRuntimeException("Role '{}' doesn't exist.", self_.role_);
|
||||
}
|
||||
if (user->role()) {
|
||||
throw QueryRuntimeException(
|
||||
"User '{}' is already a member of role '{}'.", self_.user_,
|
||||
user->role()->rolename());
|
||||
}
|
||||
user->SetRole(*role);
|
||||
auth.SaveUser(*user);
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::CLEAR_ROLE: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_);
|
||||
if (!user) {
|
||||
throw QueryRuntimeException("User '{}' doesn't exist.", self_.user_);
|
||||
}
|
||||
user->ClearRole();
|
||||
auth.SaveUser(*user);
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::GRANT_PRIVILEGE:
|
||||
case AuthQuery::Action::DENY_PRIVILEGE:
|
||||
case AuthQuery::Action::REVOKE_PRIVILEGE: {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_or_role_);
|
||||
auto role = auth.GetRole(self_.user_or_role_);
|
||||
if (!user && !role) {
|
||||
throw QueryRuntimeException("User or role '{}' doesn't exist.",
|
||||
self_.user_or_role_);
|
||||
}
|
||||
auto permissions = GetAuthPermissions();
|
||||
if (user) {
|
||||
for (const auto &permission : permissions) {
|
||||
// TODO (mferencevic): should we first check that the privilege
|
||||
// is granted/denied/revoked before unconditionally
|
||||
// granting/denying/revoking it?
|
||||
if (self_.action_ == AuthQuery::Action::GRANT_PRIVILEGE) {
|
||||
user->permissions().Grant(permission);
|
||||
} else if (self_.action_ == AuthQuery::Action::DENY_PRIVILEGE) {
|
||||
user->permissions().Deny(permission);
|
||||
} else {
|
||||
user->permissions().Revoke(permission);
|
||||
}
|
||||
}
|
||||
auth.SaveUser(*user);
|
||||
} else {
|
||||
for (const auto &permission : permissions) {
|
||||
// TODO (mferencevic): should we first check that the privilege
|
||||
// is granted/denied/revoked before unconditionally
|
||||
// granting/denying/revoking it?
|
||||
if (self_.action_ == AuthQuery::Action::GRANT_PRIVILEGE) {
|
||||
role->permissions().Grant(permission);
|
||||
} else if (self_.action_ == AuthQuery::Action::DENY_PRIVILEGE) {
|
||||
role->permissions().Deny(permission);
|
||||
} else {
|
||||
role->permissions().Revoke(permission);
|
||||
}
|
||||
}
|
||||
auth.SaveRole(*role);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SHOW_PRIVILEGES: {
|
||||
if (!grants_) {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_or_role_);
|
||||
auto role = auth.GetRole(self_.user_or_role_);
|
||||
if (!user && !role) {
|
||||
throw QueryRuntimeException("User or role '{}' doesn't exist.",
|
||||
self_.user_or_role_);
|
||||
}
|
||||
if (user) {
|
||||
grants_.emplace(GetGrantsForAuthUser(*user));
|
||||
} else {
|
||||
grants_.emplace(GetGrantsForAuthRole(*role));
|
||||
}
|
||||
grants_it_ = grants_->begin();
|
||||
}
|
||||
|
||||
if (grants_it_ == grants_->end()) return false;
|
||||
|
||||
frame[self_.privilege_symbol_] = std::get<0>(*grants_it_);
|
||||
frame[self_.effective_symbol_] = std::get<1>(*grants_it_);
|
||||
frame[self_.details_symbol_] = std::get<2>(*grants_it_);
|
||||
grants_it_++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SHOW_ROLE_FOR_USER: {
|
||||
if (returned_role_for_user_) return false;
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto user = auth.GetUser(self_.user_);
|
||||
if (!user) {
|
||||
throw QueryRuntimeException("User '{}' doesn't exist.", self_.user_);
|
||||
}
|
||||
if (user->role()) {
|
||||
frame[self_.role_symbol_] = user->role()->rolename();
|
||||
} else {
|
||||
frame[self_.role_symbol_] = TypedValue::Null;
|
||||
}
|
||||
returned_role_for_user_ = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
case AuthQuery::Action::SHOW_USERS_FOR_ROLE: {
|
||||
if (!users_) {
|
||||
std::lock_guard<std::mutex> lock(auth.WithLock());
|
||||
auto role = auth.GetRole(self_.role_);
|
||||
if (!role) {
|
||||
throw QueryRuntimeException("Role '{}' doesn't exist.",
|
||||
self_.role_);
|
||||
}
|
||||
users_.emplace(auth.AllUsersForRole(self_.role_));
|
||||
users_it_ = users_->begin();
|
||||
}
|
||||
|
||||
if (users_it_ == users_->end()) return false;
|
||||
|
||||
frame[self_.user_symbol_] = users_it_->username();
|
||||
users_it_++;
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override {
|
||||
LOG(FATAL) << "AuthHandler cursor should never be reset";
|
||||
}
|
||||
|
||||
private:
|
||||
const AuthHandler &self_;
|
||||
std::experimental::optional<std::vector<auth::User>> users_;
|
||||
std::vector<auth::User>::iterator users_it_;
|
||||
std::experimental::optional<std::vector<auth::Role>> roles_;
|
||||
std::vector<auth::Role>::iterator roles_it_;
|
||||
std::experimental::optional<
|
||||
std::vector<std::tuple<std::string, std::string, std::string>>>
|
||||
grants_;
|
||||
std::vector<std::tuple<std::string, std::string, std::string>>::iterator
|
||||
grants_it_;
|
||||
bool returned_role_for_user_{false};
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> AuthHandler::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<AuthHandlerCursor>(*this);
|
||||
}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(AuthHandler)
|
||||
|
||||
CreateStream::CreateStream(std::string stream_name, Expression *stream_uri,
|
||||
Expression *stream_topic, Expression *transform_uri,
|
||||
Expression *batch_interval_in_ms,
|
||||
Expression *batch_size)
|
||||
: stream_name_(std::move(stream_name)),
|
||||
stream_uri_(stream_uri),
|
||||
stream_topic_(stream_topic),
|
||||
transform_uri_(transform_uri),
|
||||
batch_interval_in_ms_(batch_interval_in_ms),
|
||||
batch_size_(batch_size) {}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(CreateStream)
|
||||
|
||||
class CreateStreamCursor : public Cursor {
|
||||
using StreamInfo = integrations::kafka::StreamInfo;
|
||||
|
||||
public:
|
||||
CreateStreamCursor(const CreateStream &self, database::GraphDbAccessor &)
|
||||
: self_(self) {}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw StreamClauseInMulticommandTxException();
|
||||
}
|
||||
ExpressionEvaluator evaluator(&frame, ctx.symbol_table_,
|
||||
ctx.evaluation_context_, &ctx.db_accessor_,
|
||||
GraphView::OLD);
|
||||
|
||||
TypedValue stream_uri = self_.stream_uri_->Accept(evaluator);
|
||||
TypedValue stream_topic = self_.stream_topic_->Accept(evaluator);
|
||||
TypedValue transform_uri = self_.transform_uri_->Accept(evaluator);
|
||||
|
||||
std::experimental::optional<int64_t> batch_interval_in_ms, batch_size;
|
||||
|
||||
if (self_.batch_interval_in_ms_) {
|
||||
batch_interval_in_ms =
|
||||
self_.batch_interval_in_ms_->Accept(evaluator).Value<int64_t>();
|
||||
}
|
||||
if (self_.batch_size_) {
|
||||
batch_size = self_.batch_size_->Accept(evaluator).Value<int64_t>();
|
||||
}
|
||||
|
||||
try {
|
||||
StreamInfo info;
|
||||
info.stream_name = self_.stream_name_;
|
||||
info.stream_uri = stream_uri.Value<std::string>();
|
||||
info.stream_topic = stream_topic.Value<std::string>();
|
||||
info.transform_uri = transform_uri.Value<std::string>();
|
||||
info.batch_interval_in_ms = batch_interval_in_ms;
|
||||
info.batch_size = batch_size;
|
||||
|
||||
ctx.kafka_streams_->Create(info);
|
||||
} catch (const integrations::kafka::KafkaStreamException &e) {
|
||||
throw QueryRuntimeException(e.what());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override { throw utils::NotYetImplemented("Create Stream"); }
|
||||
|
||||
private:
|
||||
const CreateStream &self_;
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> CreateStream::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<CreateStreamCursor>(*this, db);
|
||||
}
|
||||
|
||||
DropStream::DropStream(std::string stream_name)
|
||||
: stream_name_(std::move(stream_name)) {}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(DropStream)
|
||||
|
||||
class DropStreamCursor : public Cursor {
|
||||
public:
|
||||
DropStreamCursor(const DropStream &self, database::GraphDbAccessor &)
|
||||
: self_(self) {}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw StreamClauseInMulticommandTxException();
|
||||
}
|
||||
|
||||
try {
|
||||
ctx.kafka_streams_->Drop(self_.stream_name_);
|
||||
} catch (const integrations::kafka::KafkaStreamException &e) {
|
||||
throw QueryRuntimeException(e.what());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override { throw utils::NotYetImplemented("Drop Stream"); }
|
||||
|
||||
private:
|
||||
const DropStream &self_;
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> DropStream::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<DropStreamCursor>(*this, db);
|
||||
}
|
||||
|
||||
ShowStreams::ShowStreams(Symbol name_symbol, Symbol uri_symbol,
|
||||
Symbol topic_symbol, Symbol transform_symbol,
|
||||
Symbol status_symbol)
|
||||
: name_symbol_(name_symbol),
|
||||
uri_symbol_(uri_symbol),
|
||||
topic_symbol_(topic_symbol),
|
||||
transform_symbol_(transform_symbol),
|
||||
status_symbol_(status_symbol) {}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(ShowStreams)
|
||||
|
||||
std::vector<Symbol> ShowStreams::OutputSymbols(const SymbolTable &) const {
|
||||
return {name_symbol_, uri_symbol_, topic_symbol_, transform_symbol_,
|
||||
status_symbol_};
|
||||
}
|
||||
|
||||
class ShowStreamsCursor : public Cursor {
|
||||
public:
|
||||
ShowStreamsCursor(const ShowStreams &self, database::GraphDbAccessor &)
|
||||
: self_(self) {}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw StreamClauseInMulticommandTxException();
|
||||
}
|
||||
|
||||
if (!is_initialized_) {
|
||||
streams_ = ctx.kafka_streams_->Show();
|
||||
streams_it_ = streams_.begin();
|
||||
is_initialized_ = true;
|
||||
}
|
||||
|
||||
if (streams_it_ == streams_.end()) return false;
|
||||
|
||||
frame[self_.name_symbol_] = streams_it_->stream_name;
|
||||
frame[self_.uri_symbol_] = streams_it_->stream_uri;
|
||||
frame[self_.topic_symbol_] = streams_it_->stream_topic;
|
||||
frame[self_.transform_symbol_] = streams_it_->transform_uri;
|
||||
frame[self_.status_symbol_] = streams_it_->stream_status;
|
||||
|
||||
streams_it_++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override { throw utils::NotYetImplemented("Show Streams"); }
|
||||
|
||||
private:
|
||||
const ShowStreams &self_;
|
||||
|
||||
bool is_initialized_ = false;
|
||||
using StreamStatus = integrations::kafka::StreamStatus;
|
||||
std::vector<StreamStatus> streams_;
|
||||
std::vector<StreamStatus>::iterator streams_it_ = streams_.begin();
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> ShowStreams::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<ShowStreamsCursor>(*this, db);
|
||||
}
|
||||
|
||||
StartStopStream::StartStopStream(std::string stream_name, bool is_start,
|
||||
Expression *limit_batches)
|
||||
: stream_name_(stream_name),
|
||||
is_start_(is_start),
|
||||
limit_batches_(limit_batches) {}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(StartStopStream)
|
||||
|
||||
class StartStopStreamCursor : public Cursor {
|
||||
public:
|
||||
StartStopStreamCursor(const StartStopStream &self,
|
||||
database::GraphDbAccessor &)
|
||||
: self_(self) {}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw StreamClauseInMulticommandTxException();
|
||||
}
|
||||
|
||||
ExpressionEvaluator evaluator(&frame, ctx.symbol_table_,
|
||||
ctx.evaluation_context_, &ctx.db_accessor_,
|
||||
GraphView::OLD);
|
||||
std::experimental::optional<int64_t> limit_batches;
|
||||
|
||||
if (self_.limit_batches_) {
|
||||
limit_batches = self_.limit_batches_->Accept(evaluator).Value<int64_t>();
|
||||
}
|
||||
|
||||
try {
|
||||
if (self_.is_start_) {
|
||||
ctx.kafka_streams_->Start(self_.stream_name_, limit_batches);
|
||||
} else {
|
||||
ctx.kafka_streams_->Stop(self_.stream_name_);
|
||||
}
|
||||
} catch (const integrations::kafka::KafkaStreamException &e) {
|
||||
throw QueryRuntimeException(e.what());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override { throw utils::NotYetImplemented("Start/Stop Stream"); }
|
||||
|
||||
private:
|
||||
const StartStopStream &self_;
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> StartStopStream::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<StartStopStreamCursor>(*this, db);
|
||||
}
|
||||
|
||||
StartStopAllStreams::StartStopAllStreams(bool is_start) : is_start_(is_start) {}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(StartStopAllStreams)
|
||||
|
||||
class StartStopAllStreamsCursor : public Cursor {
|
||||
public:
|
||||
StartStopAllStreamsCursor(const StartStopAllStreams &self,
|
||||
database::GraphDbAccessor &)
|
||||
: self_(self) {}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw StreamClauseInMulticommandTxException();
|
||||
}
|
||||
|
||||
try {
|
||||
if (self_.is_start_) {
|
||||
ctx.kafka_streams_->StartAll();
|
||||
} else {
|
||||
ctx.kafka_streams_->StopAll();
|
||||
}
|
||||
} catch (const integrations::kafka::KafkaStreamException &e) {
|
||||
throw QueryRuntimeException(e.what());
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override {
|
||||
throw utils::NotYetImplemented("Start/Stop All Streams");
|
||||
}
|
||||
|
||||
private:
|
||||
const StartStopAllStreams &self_;
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> StartStopAllStreams::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<StartStopAllStreamsCursor>(*this, db);
|
||||
}
|
||||
|
||||
TestStream::TestStream(std::string stream_name, Expression *limit_batches,
|
||||
Symbol query_symbol, Symbol params_symbol)
|
||||
: stream_name_(stream_name),
|
||||
limit_batches_(limit_batches),
|
||||
query_symbol_(query_symbol),
|
||||
params_symbol_(params_symbol) {}
|
||||
|
||||
WITHOUT_SINGLE_INPUT(TestStream)
|
||||
|
||||
std::vector<Symbol> TestStream::OutputSymbols(const SymbolTable &) const {
|
||||
return {query_symbol_, params_symbol_};
|
||||
}
|
||||
|
||||
class TestStreamCursor : public Cursor {
|
||||
public:
|
||||
TestStreamCursor(const TestStream &self, database::GraphDbAccessor &)
|
||||
: self_(self) {}
|
||||
|
||||
bool Pull(Frame &frame, Context &ctx) override {
|
||||
if (ctx.in_explicit_transaction_) {
|
||||
throw StreamClauseInMulticommandTxException();
|
||||
}
|
||||
|
||||
if (!is_initialized_) {
|
||||
ExpressionEvaluator evaluator(&frame, ctx.symbol_table_,
|
||||
ctx.evaluation_context_, &ctx.db_accessor_,
|
||||
GraphView::OLD);
|
||||
std::experimental::optional<int64_t> limit_batches;
|
||||
|
||||
if (self_.limit_batches_) {
|
||||
limit_batches =
|
||||
self_.limit_batches_->Accept(evaluator).Value<int64_t>();
|
||||
}
|
||||
|
||||
try {
|
||||
auto results =
|
||||
ctx.kafka_streams_->Test(self_.stream_name_, limit_batches);
|
||||
for (const auto &result : results) {
|
||||
std::map<std::string, query::TypedValue> params_tv;
|
||||
for (const auto &kv : result.second) {
|
||||
params_tv.emplace(kv.first, glue::ToTypedValue(kv.second));
|
||||
}
|
||||
results_.emplace_back(result.first, params_tv);
|
||||
}
|
||||
} catch (const integrations::kafka::KafkaStreamException &e) {
|
||||
throw QueryRuntimeException(e.what());
|
||||
}
|
||||
results_it_ = results_.begin();
|
||||
is_initialized_ = true;
|
||||
}
|
||||
|
||||
if (results_it_ == results_.end()) return false;
|
||||
|
||||
frame[self_.query_symbol_] = results_it_->first;
|
||||
frame[self_.params_symbol_] = results_it_->second;
|
||||
results_it_++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Shutdown() override {}
|
||||
|
||||
void Reset() override { throw utils::NotYetImplemented("Test Stream"); }
|
||||
|
||||
private:
|
||||
const TestStream &self_;
|
||||
|
||||
bool is_initialized_ = false;
|
||||
std::vector<std::pair<std::string, TypedValue>> results_;
|
||||
std::vector<std::pair<std::string, TypedValue>>::iterator results_it_ =
|
||||
results_.begin();
|
||||
};
|
||||
|
||||
std::unique_ptr<Cursor> TestStream::MakeCursor(
|
||||
database::GraphDbAccessor &db) const {
|
||||
return std::make_unique<TestStreamCursor>(*this, db);
|
||||
}
|
||||
|
||||
Explain::Explain(
|
||||
const std::shared_ptr<LogicalOperator> &input, const Symbol &output_symbol,
|
||||
const std::function<void(const database::GraphDbAccessor &,
|
||||
|
@ -97,13 +97,6 @@ class Distinct;
|
||||
class CreateIndex;
|
||||
class Union;
|
||||
class Cartesian;
|
||||
class AuthHandler;
|
||||
class CreateStream;
|
||||
class DropStream;
|
||||
class ShowStreams;
|
||||
class StartStopStream;
|
||||
class StartStopAllStreams;
|
||||
class TestStream;
|
||||
class Explain;
|
||||
|
||||
using LogicalOperatorCompositeVisitor = ::utils::CompositeVisitor<
|
||||
@ -116,9 +109,7 @@ using LogicalOperatorCompositeVisitor = ::utils::CompositeVisitor<
|
||||
OrderBy, Merge, Optional, Unwind, Distinct, Union, Cartesian, Explain>;
|
||||
|
||||
using LogicalOperatorLeafVisitor =
|
||||
::utils::LeafVisitor<Once, CreateIndex, AuthHandler, CreateStream,
|
||||
DropStream, ShowStreams, StartStopStream,
|
||||
StartStopAllStreams, TestStream>;
|
||||
::utils::LeafVisitor<Once, CreateIndex>;
|
||||
|
||||
/**
|
||||
* @brief Base class for hierarhical visitors of @c LogicalOperator class
|
||||
@ -2003,137 +1994,6 @@ and returns true, once.")
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class auth-handler (logical-operator)
|
||||
((action "AuthQuery::Action" :scope :public
|
||||
:capnp-init nil
|
||||
:capnp-type "Ast.AuthQuery.Action"
|
||||
:capnp-save (lcp:capnp-save-enum "::query::capnp::AuthQuery::Action"
|
||||
"AuthQuery::Action"
|
||||
'(create-role drop-role show-roles
|
||||
create-user set-password
|
||||
drop-user show-users set-role
|
||||
clear-role grant-privilege
|
||||
deny-privilege revoke-privilege
|
||||
show-privileges show-role-for-user
|
||||
show-users-for-role))
|
||||
:capnp-load (lcp:capnp-load-enum "::query::capnp::AuthQuery::Action"
|
||||
"AuthQuery::Action"
|
||||
'(create-role drop-role show-roles
|
||||
create-user set-password
|
||||
drop-user show-users set-role
|
||||
clear-role grant-privilege
|
||||
deny-privilege revoke-privilege
|
||||
show-privileges show-role-for-user
|
||||
show-users-for-role)))
|
||||
(user "std::string" :scope :public)
|
||||
(role "std::string" :scope :public)
|
||||
(user-or-role "std::string" :scope :public)
|
||||
(password "Expression *" :scope :public
|
||||
:capnp-type "Ast.Tree" :capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(privileges "std::vector<AuthQuery::Privilege>" :scope :public
|
||||
:capnp-type "List(Ast.AuthQuery.Privilege)"
|
||||
:capnp-save
|
||||
(lambda (builder member capnp-name)
|
||||
(declare (ignore capnp-name))
|
||||
#>cpp
|
||||
for (size_t i = 0; i < ${member}.size(); ++i) {
|
||||
switch (${member}[i]) {
|
||||
case AuthQuery::Privilege::CREATE:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::CREATE);
|
||||
break;
|
||||
case AuthQuery::Privilege::DELETE:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::DELETE);
|
||||
break;
|
||||
case AuthQuery::Privilege::MATCH:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::MATCH);
|
||||
break;
|
||||
case AuthQuery::Privilege::MERGE:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::MERGE);
|
||||
break;
|
||||
case AuthQuery::Privilege::SET:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::SET);
|
||||
break;
|
||||
case AuthQuery::Privilege::REMOVE:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::REMOVE);
|
||||
break;
|
||||
case AuthQuery::Privilege::INDEX:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::INDEX);
|
||||
break;
|
||||
case AuthQuery::Privilege::AUTH:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::AUTH);
|
||||
break;
|
||||
case AuthQuery::Privilege::STREAM:
|
||||
${builder}.set(i, query::capnp::AuthQuery::Privilege::STREAM);
|
||||
break;
|
||||
}
|
||||
}
|
||||
cpp<#)
|
||||
:capnp-load
|
||||
(lambda (reader member-name capnp-name)
|
||||
(declare (ignore capnp-name))
|
||||
#>cpp
|
||||
for (auto privilege : ${reader}) {
|
||||
switch (privilege) {
|
||||
case query::capnp::AuthQuery::Privilege::CREATE:
|
||||
${member-name}.push_back(AuthQuery::Privilege::CREATE);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::DELETE:
|
||||
${member-name}.push_back(AuthQuery::Privilege::DELETE);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::MATCH:
|
||||
${member-name}.push_back(AuthQuery::Privilege::MATCH);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::MERGE:
|
||||
${member-name}.push_back(AuthQuery::Privilege::MERGE);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::SET:
|
||||
${member-name}.push_back(AuthQuery::Privilege::SET);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::REMOVE:
|
||||
${member-name}.push_back(AuthQuery::Privilege::REMOVE);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::INDEX:
|
||||
${member-name}.push_back(AuthQuery::Privilege::INDEX);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::AUTH:
|
||||
${member-name}.push_back(AuthQuery::Privilege::AUTH);
|
||||
break;
|
||||
case query::capnp::AuthQuery::Privilege::STREAM:
|
||||
${member-name}.push_back(AuthQuery::Privilege::STREAM);
|
||||
break;
|
||||
}
|
||||
}
|
||||
cpp<#))
|
||||
(user-symbol "Symbol" :scope :public)
|
||||
(role-symbol "Symbol" :scope :public)
|
||||
(privilege-symbol "Symbol" :scope :public)
|
||||
(effective-symbol "Symbol" :scope :public)
|
||||
(details-symbol "Symbol" :scope :public))
|
||||
(:public
|
||||
#>cpp
|
||||
AuthHandler() {}
|
||||
AuthHandler(AuthQuery::Action action, std::string user, std::string role,
|
||||
std::string user_or_role, Expression * password,
|
||||
std::vector<AuthQuery::Privilege> privileges,
|
||||
Symbol user_symbol, Symbol role_symbol,
|
||||
Symbol privilege_symbol, Symbol effective_symbol,
|
||||
Symbol details_symbol);
|
||||
|
||||
bool Accept(HierarchicalLogicalOperatorVisitor &visitor) override;
|
||||
std::unique_ptr<Cursor> MakeCursor(database::GraphDbAccessor & db)
|
||||
const override;
|
||||
std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
|
||||
virtual std::vector<Symbol> ModifiedSymbols(const SymbolTable &)
|
||||
const override { return {}; }
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class unwind (logical-operator)
|
||||
((input "std::shared_ptr<LogicalOperator>" :scope :public
|
||||
:capnp-save #'save-operator-pointer
|
||||
@ -2374,196 +2234,6 @@ vectors of symbols used by each of the inputs.")
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
;;; KAFKA STREAM OPERATORS
|
||||
(lcp:define-class create-stream (logical-operator)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(stream-uri "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(stream-topic "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(transform-uri "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(batch-interval-in-ms "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(batch-size "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *")))
|
||||
(:documentation
|
||||
"Creates a new stream with given parameters that imports data.")
|
||||
(:public
|
||||
#>cpp
|
||||
CreateStream() {}
|
||||
CreateStream(std::string stream_name, Expression *stream_uri,
|
||||
Expression *stream_topic, Expression *transform_uri,
|
||||
Expression *batch_interval_in_ms, Expression *batch_size);
|
||||
DEFVISITABLE(HierarchicalLogicalOperatorVisitor);
|
||||
std::unique_ptr<Cursor> MakeCursor(
|
||||
database::GraphDbAccessor &db) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override {
|
||||
return {};
|
||||
}
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class drop-stream (logical-operator)
|
||||
((stream-name "std::string" :scope :public))
|
||||
(:documentation
|
||||
"Stops and deletes a stream if it exists.")
|
||||
(:public
|
||||
#>cpp
|
||||
DropStream() {}
|
||||
explicit DropStream(std::string stream_name);
|
||||
DEFVISITABLE(HierarchicalLogicalOperatorVisitor);
|
||||
std::unique_ptr<Cursor> MakeCursor(
|
||||
database::GraphDbAccessor &db) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override {
|
||||
return {};
|
||||
}
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class show-streams (logical-operator)
|
||||
((name-symbol "Symbol" :scope :public)
|
||||
(uri-symbol "Symbol" :scope :public)
|
||||
(topic-symbol "Symbol" :scope :public)
|
||||
(transform-symbol "Symbol" :scope :public)
|
||||
(status-symbol "Symbol" :scope :public))
|
||||
(:documentation
|
||||
"Shows all streams, started and stopped, that were configured.")
|
||||
(:public
|
||||
#>cpp
|
||||
ShowStreams() {}
|
||||
ShowStreams(Symbol name_symbol, Symbol endpoint_symbol, Symbol topic_symbol,
|
||||
Symbol transform_symbol, Symbol status_symbol);
|
||||
DEFVISITABLE(HierarchicalLogicalOperatorVisitor);
|
||||
std::unique_ptr<Cursor> MakeCursor(
|
||||
database::GraphDbAccessor &db) const override;
|
||||
std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override {
|
||||
return {};
|
||||
}
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class start-stop-stream (logical-operator)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(is-start :bool :scope :public)
|
||||
(limit-batches "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *")))
|
||||
(:documentation
|
||||
"Starts or stops a stream.
|
||||
When starting a stream import, it can be limited by number of batches
|
||||
If a stream is already running, it wont limit the number of batches that this
|
||||
stream is importing.")
|
||||
(:public
|
||||
#>cpp
|
||||
StartStopStream() {}
|
||||
StartStopStream(std::string stream_name, bool is_start,
|
||||
Expression *limit_batches);
|
||||
DEFVISITABLE(HierarchicalLogicalOperatorVisitor);
|
||||
std::unique_ptr<Cursor> MakeCursor(
|
||||
database::GraphDbAccessor &db) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override {
|
||||
return {};
|
||||
}
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class start-stop-all-streams (logical-operator)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(is-start :bool :scope :public))
|
||||
(:documentation
|
||||
"Starts or stops all stream.")
|
||||
(:public
|
||||
#>cpp
|
||||
StartStopAllStreams() {}
|
||||
explicit StartStopAllStreams(bool is_start);
|
||||
DEFVISITABLE(HierarchicalLogicalOperatorVisitor);
|
||||
std::unique_ptr<Cursor> MakeCursor(
|
||||
database::GraphDbAccessor &db) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override {
|
||||
return {};
|
||||
}
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
(lcp:define-class test-stream (logical-operator)
|
||||
((stream-name "std::string" :scope :public)
|
||||
(limit-batches "Expression *"
|
||||
:scope :public
|
||||
:capnp-type "Ast.Tree"
|
||||
:capnp-init nil
|
||||
:capnp-save #'save-ast-pointer
|
||||
:capnp-load (load-ast-pointer "Expression *"))
|
||||
(query-symbol "Symbol" :scope :public)
|
||||
(params-symbol "Symbol" :scope :public))
|
||||
(:documentation
|
||||
"Test a stream. This will start consuming messages but wont insert anything
|
||||
in the db.")
|
||||
(:public
|
||||
#>cpp
|
||||
TestStream() {}
|
||||
TestStream(std::string stream_name, Expression *limit_batches,
|
||||
Symbol query_symbol, Symbol params_symbol);
|
||||
DEFVISITABLE(HierarchicalLogicalOperatorVisitor);
|
||||
std::unique_ptr<Cursor> MakeCursor(
|
||||
database::GraphDbAccessor &db) const override;
|
||||
std::vector<Symbol> OutputSymbols(const SymbolTable &) const override;
|
||||
std::vector<Symbol> ModifiedSymbols(const SymbolTable &) const override {
|
||||
return {};
|
||||
}
|
||||
|
||||
bool HasSingleInput() const override;
|
||||
std::shared_ptr<LogicalOperator> input() const override;
|
||||
void set_input(std::shared_ptr<LogicalOperator>) override;
|
||||
cpp<#)
|
||||
(:serialize :capnp))
|
||||
|
||||
;;; END KAFKA STREAM OPERATORS
|
||||
|
||||
(lcp:define-class explain (logical-operator)
|
||||
((input "std::shared_ptr<LogicalOperator>" :scope :public
|
||||
:capnp-save #'save-operator-pointer
|
||||
|
@ -54,13 +54,6 @@ class UsedSymbolsCollector : public HierarchicalTreeVisitor {
|
||||
bool Visit(ParameterLookup &) override { return true; }
|
||||
bool Visit(query::CreateIndex &) override { return true; }
|
||||
bool Visit(query::CreateUniqueIndex &) override { return true; }
|
||||
bool Visit(query::AuthQuery &) override { return true; }
|
||||
bool Visit(query::CreateStream &) override { return true; }
|
||||
bool Visit(query::DropStream &) override { return true; }
|
||||
bool Visit(query::ShowStreams &) override { return true; }
|
||||
bool Visit(query::StartStopStream &) override { return true; }
|
||||
bool Visit(query::StartStopAllStreams &) override { return true; }
|
||||
bool Visit(query::TestStream &) override { return true; }
|
||||
|
||||
std::unordered_set<Symbol> symbols_;
|
||||
const SymbolTable &symbol_table_;
|
||||
|
@ -148,41 +148,6 @@ bool PlanPrinter::Visit(query::plan::CreateIndex &op) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::AuthHandler &op) {
|
||||
WithPrintLn([](auto &out) { out << "* AuthHandler"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::CreateStream &op) {
|
||||
WithPrintLn([](auto &out) { out << "* CreateStream"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::DropStream &op) {
|
||||
WithPrintLn([](auto &out) { out << "* DropStream"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::ShowStreams &op) {
|
||||
WithPrintLn([](auto &out) { out << "* ShowStreams"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::StartStopStream &op) {
|
||||
WithPrintLn([](auto &out) { out << "* StartStopStream"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::StartStopAllStreams &op) {
|
||||
WithPrintLn([](auto &out) { out << "* StartStopAllStreams"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::Visit(query::plan::TestStream &op) {
|
||||
WithPrintLn([](auto &out) { out << "* TestStream"; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PlanPrinter::PreVisit(query::plan::Explain &explain) {
|
||||
WithPrintLn([&explain](auto &out) {
|
||||
out << "* Explain {" << explain.output_symbol_.name() << "}";
|
||||
|
@ -79,15 +79,6 @@ class PlanPrinter : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
bool Visit(Once &) override;
|
||||
bool Visit(CreateIndex &) override;
|
||||
|
||||
bool Visit(AuthHandler &) override;
|
||||
|
||||
bool Visit(CreateStream &) override;
|
||||
bool Visit(DropStream &) override;
|
||||
bool Visit(ShowStreams &) override;
|
||||
bool Visit(StartStopStream &) override;
|
||||
bool Visit(StartStopAllStreams &) override;
|
||||
bool Visit(TestStream &) override;
|
||||
|
||||
/// Call fun with output stream. The stream is prefixed with amount of spaces
|
||||
/// corresponding to the current depth_.
|
||||
template <class TFun>
|
||||
|
@ -407,41 +407,6 @@ class ReturnBodyContext : public HierarchicalTreeVisitor {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::AuthQuery &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::CreateStream &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::DropStream &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::ShowStreams &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::StartStopStream &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::StartStopAllStreams &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Visit(query::TestStream &) override {
|
||||
has_aggregation_.emplace_back(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates NamedExpression with an Identifier for each user declared symbol.
|
||||
// This should be used when body.all_identifiers is true, to generate
|
||||
// expressions for Produce operator.
|
||||
|
@ -189,59 +189,6 @@ class RuleBasedPlanner {
|
||||
DCHECK(!input_op) << "Unexpected operator before CreateIndex";
|
||||
input_op = std::make_unique<plan::CreateIndex>(
|
||||
create_index->label_, create_index->properties_, true);
|
||||
} else if (auto *auth_query =
|
||||
dynamic_cast<query::AuthQuery *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before AuthQuery";
|
||||
auto &symbol_table = context.symbol_table;
|
||||
input_op = std::make_unique<plan::AuthHandler>(
|
||||
auth_query->action_, auth_query->user_, auth_query->role_,
|
||||
auth_query->user_or_role_, auth_query->password_,
|
||||
auth_query->privileges_, symbol_table.CreateSymbol("user", false),
|
||||
symbol_table.CreateSymbol("role", false),
|
||||
symbol_table.CreateSymbol("privilege", false),
|
||||
symbol_table.CreateSymbol("effective", false),
|
||||
symbol_table.CreateSymbol("details", false));
|
||||
} else if (auto *create_stream =
|
||||
dynamic_cast<query::CreateStream *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before CreateStream";
|
||||
input_op = std::make_unique<plan::CreateStream>(
|
||||
create_stream->stream_name_, create_stream->stream_uri_,
|
||||
create_stream->stream_topic_, create_stream->transform_uri_,
|
||||
create_stream->batch_interval_in_ms_, create_stream->batch_size_);
|
||||
} else if (auto *drop_stream =
|
||||
dynamic_cast<query::DropStream *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before DropStream";
|
||||
input_op =
|
||||
std::make_unique<plan::DropStream>(drop_stream->stream_name_);
|
||||
} else if (dynamic_cast<query::ShowStreams *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before ShowStreams";
|
||||
// Create symbols for ShowStreams results
|
||||
auto &symbol_table = context.symbol_table;
|
||||
input_op = std::make_unique<plan::ShowStreams>(
|
||||
symbol_table.CreateSymbol("name", false),
|
||||
symbol_table.CreateSymbol("uri", false),
|
||||
symbol_table.CreateSymbol("topic", false),
|
||||
symbol_table.CreateSymbol("transform", false),
|
||||
symbol_table.CreateSymbol("status", false));
|
||||
} else if (auto *start_stop_stream =
|
||||
dynamic_cast<query::StartStopStream *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before StartStopStream";
|
||||
input_op = std::make_unique<plan::StartStopStream>(
|
||||
start_stop_stream->stream_name_, start_stop_stream->is_start_,
|
||||
start_stop_stream->limit_batches_);
|
||||
} else if (auto *start_stop_all_streams =
|
||||
dynamic_cast<query::StartStopAllStreams *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before StartStopAllStreams";
|
||||
input_op = std::make_unique<plan::StartStopAllStreams>(
|
||||
start_stop_all_streams->is_start_);
|
||||
} else if (auto *test_stream =
|
||||
dynamic_cast<query::TestStream *>(clause)) {
|
||||
DCHECK(!input_op) << "Unexpected operator before TestStream";
|
||||
auto &symbol_table = context.symbol_table;
|
||||
input_op = std::make_unique<plan::TestStream>(
|
||||
test_stream->stream_name_, test_stream->limit_batches_,
|
||||
symbol_table.CreateSymbol("query", false),
|
||||
symbol_table.CreateSymbol("params", false));
|
||||
} else {
|
||||
throw utils::NotYetImplemented("clause conversion to operator(s)");
|
||||
}
|
||||
|
@ -16,9 +16,9 @@ class TransactionEngine final {
|
||||
|
||||
~TransactionEngine() { Abort(); }
|
||||
|
||||
std::pair<std::vector<std::string>, std::vector<query::AuthQuery::Privilege>>
|
||||
Interpret(const std::string &query,
|
||||
const std::map<std::string, PropertyValue> ¶ms) {
|
||||
std::vector<std::string> Interpret(
|
||||
const std::string &query,
|
||||
const std::map<std::string, PropertyValue> ¶ms) {
|
||||
// Clear pending results.
|
||||
results_ = std::experimental::nullopt;
|
||||
|
||||
@ -64,7 +64,7 @@ class TransactionEngine final {
|
||||
try {
|
||||
results_.emplace((*interpreter_)(query, *db_accessor_, params,
|
||||
in_explicit_transaction_));
|
||||
return {results_->header(), results_->privileges()};
|
||||
return results_->header();
|
||||
} catch (const utils::BasicException &) {
|
||||
AbortCommand();
|
||||
throw;
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include "data_structures/concurrent/concurrent_map.hpp"
|
||||
#include "mvcc/single_node/version_list.hpp"
|
||||
#include "stats/metrics.hpp"
|
||||
#include "storage/single_node/deferred_deleter.hpp"
|
||||
#include "storage/single_node/edge.hpp"
|
||||
#include "storage/single_node/garbage_collector.hpp"
|
||||
|
@ -7,13 +7,5 @@ set(utils_src_files
|
||||
uuid.cpp
|
||||
watchdog.cpp)
|
||||
|
||||
define_add_capnp(utils_src_files utils_capnp_files)
|
||||
|
||||
add_capnp(serialization.capnp)
|
||||
|
||||
add_custom_target(generate_utils_capnp DEPENDS ${utils_capnp_files})
|
||||
|
||||
add_library(mg-utils STATIC ${utils_src_files})
|
||||
target_link_libraries(mg-utils stdc++fs Threads::Threads fmt glog gflags uuid)
|
||||
target_link_libraries(mg-utils capnp kj)
|
||||
add_dependencies(mg-utils generate_utils_capnp)
|
||||
|
@ -27,6 +27,3 @@ add_subdirectory(property_based)
|
||||
|
||||
# integration test binaries
|
||||
add_subdirectory(integration)
|
||||
|
||||
# feature benchmark test binaries
|
||||
add_subdirectory(feature_benchmark)
|
||||
|
@ -51,9 +51,6 @@ target_link_libraries(${test_prefix}edge_storage mg-single-node kvstore_dummy_li
|
||||
add_benchmark(mvcc.cpp)
|
||||
target_link_libraries(${test_prefix}mvcc mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_benchmark(serialization.cpp)
|
||||
target_link_libraries(${test_prefix}serialization mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_benchmark(tx_engine.cpp)
|
||||
target_link_libraries(${test_prefix}tx_engine mg-single-node kvstore_dummy_lib)
|
||||
|
||||
|
@ -1,47 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from card_fraud import NUM_MACHINES, BINARIES
|
||||
|
||||
# paths
|
||||
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
|
||||
WORKSPACE_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", "..", "..", ".."))
|
||||
OUTPUT_DIR_REL = os.path.join(os.path.relpath(SCRIPT_DIR, WORKSPACE_DIR), "output")
|
||||
|
||||
# generate runs
|
||||
runs = []
|
||||
|
||||
binaries = list(map(lambda x: os.path.join("..", "..", "build_release", x), BINARIES))
|
||||
|
||||
for i in range(NUM_MACHINES):
|
||||
name = "master" if i == 0 else "worker" + str(i)
|
||||
additional = ["master.py"] if i == 0 else []
|
||||
outfile_paths = ["\\./" + OUTPUT_DIR_REL + "/.+"] if i == 0 else []
|
||||
if i == 0:
|
||||
cmd = "master.py"
|
||||
args = "--machines-num {0} --test-suite card_fraud " \
|
||||
"--test card_fraud".format(NUM_MACHINES)
|
||||
else:
|
||||
cmd = "jail_service.py"
|
||||
args = ""
|
||||
runs.append({
|
||||
"name": "distributed__card_fraud__" + name,
|
||||
"cd": "..",
|
||||
"supervisor": cmd,
|
||||
"arguments": args,
|
||||
"infiles": binaries + [
|
||||
"common.py",
|
||||
"jail_service.py",
|
||||
"card_fraud/card_fraud.py",
|
||||
"card_fraud/snapshots/worker_" + str(i),
|
||||
] + additional,
|
||||
"outfile_paths": outfile_paths,
|
||||
"parallel_run": "distributed__card_fraud",
|
||||
"slave_group": "remote_4c32g",
|
||||
"enable_network": True,
|
||||
})
|
||||
|
||||
print(json.dumps(runs, indent=4, sort_keys=True))
|
@ -1,10 +0,0 @@
|
||||
- name: feature_benchmark__kafka
|
||||
cd: kafka
|
||||
commands: ./runner.sh
|
||||
infiles:
|
||||
- runner.sh # runner script
|
||||
- transform.py # transform script
|
||||
- generate.py # dataset generator script
|
||||
- ../../../build_release/tests/feature_benchmark/kafka/kafka.py # kafka script
|
||||
- ../../../build_release/tests/feature_benchmark/kafka/benchmark # benchmark binary
|
||||
enable_network: true
|
@ -6,12 +6,3 @@ add_subdirectory(ssl)
|
||||
|
||||
# transactions test binaries
|
||||
add_subdirectory(transactions)
|
||||
|
||||
# kafka test binaries
|
||||
add_subdirectory(kafka)
|
||||
|
||||
# auth test binaries
|
||||
add_subdirectory(auth)
|
||||
|
||||
# distributed test binaries
|
||||
add_subdirectory(distributed)
|
||||
|
@ -22,31 +22,3 @@
|
||||
- runner.sh # runner script
|
||||
- ../../../build_debug/memgraph # memgraph binary
|
||||
- ../../../build_debug/tests/integration/transactions/tester # tester binary
|
||||
|
||||
- name: integration__kafka
|
||||
cd: kafka
|
||||
commands: ./runner.sh
|
||||
infiles:
|
||||
- runner.sh # runner script
|
||||
- transform.py # transform script
|
||||
- ../../../build_debug/memgraph # memgraph binary
|
||||
- ../../../build_debug/kafka.py # kafka script
|
||||
- ../../../build_debug/tests/integration/kafka/tester # tester binary
|
||||
enable_network: true
|
||||
|
||||
- name: integration__auth
|
||||
cd: auth
|
||||
commands: TIMEOUT=820 ./runner.py
|
||||
infiles:
|
||||
- runner.py # runner script
|
||||
- ../../../build_debug/memgraph # memgraph binary
|
||||
- ../../../build_debug/tests/integration/auth/checker # checker binary
|
||||
- ../../../build_debug/tests/integration/auth/tester # tester binary
|
||||
|
||||
- name: integration__distributed
|
||||
cd: distributed
|
||||
commands: TIMEOUT=480 ./runner.py
|
||||
infiles:
|
||||
- runner.py # runner script
|
||||
- ../../../build_debug/memgraph_distributed # memgraph distributed binary
|
||||
- ../../../build_debug/tests/integration/distributed/tester # tester binary
|
||||
|
@ -15,16 +15,16 @@ function(add_macro_benchmark test_cpp)
|
||||
endfunction(add_macro_benchmark)
|
||||
|
||||
add_macro_benchmark(clients/pokec_client.cpp)
|
||||
target_link_libraries(${test_prefix}pokec_client mg-communication mg-io mg-utils mg-stats json)
|
||||
target_link_libraries(${test_prefix}pokec_client mg-communication mg-io mg-utils json)
|
||||
|
||||
add_macro_benchmark(clients/graph_500_bfs.cpp)
|
||||
target_link_libraries(${test_prefix}graph_500_bfs mg-communication mg-io mg-utils mg-stats json)
|
||||
target_link_libraries(${test_prefix}graph_500_bfs mg-communication mg-io mg-utils json)
|
||||
|
||||
add_macro_benchmark(clients/bfs_pokec_client.cpp)
|
||||
target_link_libraries(${test_prefix}bfs_pokec_client mg-communication mg-io mg-utils mg-stats json)
|
||||
target_link_libraries(${test_prefix}bfs_pokec_client mg-communication mg-io mg-utils json)
|
||||
|
||||
add_macro_benchmark(clients/query_client.cpp)
|
||||
target_link_libraries(${test_prefix}query_client mg-communication mg-io mg-utils)
|
||||
|
||||
add_macro_benchmark(clients/card_fraud_client.cpp)
|
||||
target_link_libraries(${test_prefix}card_fraud_client mg-communication mg-io mg-utils mg-stats json)
|
||||
target_link_libraries(${test_prefix}card_fraud_client mg-communication mg-io mg-utils json)
|
||||
|
@ -5,9 +5,6 @@
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
|
||||
#include "communication/rpc/client.hpp"
|
||||
#include "stats/stats.hpp"
|
||||
#include "stats/stats_rpc_messages.hpp"
|
||||
#include "utils/thread/sync.hpp"
|
||||
|
||||
#include "long_running_common.hpp"
|
||||
@ -23,14 +20,6 @@ DEFINE_string(config, "", "test config");
|
||||
|
||||
enum class Role { WORKER, ANALYTIC, CLEANUP };
|
||||
|
||||
stats::Gauge &num_vertices = stats::GetGauge("vertices");
|
||||
stats::Gauge &num_edges = stats::GetGauge("edges");
|
||||
|
||||
void UpdateStats() {
|
||||
num_vertices.Set(num_pos + num_cards + num_transactions);
|
||||
num_edges.Set(2 * num_transactions);
|
||||
}
|
||||
|
||||
int64_t NumNodesWithLabel(Client &client, std::string label) {
|
||||
std::string query = fmt::format("MATCH (u :{}) RETURN count(u)", label);
|
||||
auto result = ExecuteNTimesTillSuccess(client, query, {}, MAX_RETRIES);
|
||||
@ -176,7 +165,6 @@ class CardFraudClient : public TestClient {
|
||||
card_id, tx_id, pos_id);
|
||||
|
||||
num_transactions++;
|
||||
UpdateStats();
|
||||
}
|
||||
|
||||
int64_t UniformInt(int64_t a, int64_t b) {
|
||||
@ -261,7 +249,6 @@ class CardFraudClient : public TestClient {
|
||||
num_transactions, num_transactions_db, deleted,
|
||||
num_transactions - num_transactions_db);
|
||||
num_transactions = num_transactions_db;
|
||||
UpdateStats();
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(
|
||||
@ -334,9 +321,6 @@ int main(int argc, char **argv) {
|
||||
|
||||
communication::Init();
|
||||
|
||||
stats::InitStatsLogging(
|
||||
fmt::format("client.long_running.{}.{}", FLAGS_group, FLAGS_scenario));
|
||||
|
||||
Endpoint endpoint(FLAGS_address, FLAGS_port);
|
||||
ClientContext context(FLAGS_use_ssl);
|
||||
Client client(&context);
|
||||
@ -384,7 +368,5 @@ int main(int argc, char **argv) {
|
||||
|
||||
RunMultithreadedTest(clients);
|
||||
|
||||
stats::StopStatsLogging();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6,8 +6,6 @@
|
||||
#include "gflags/gflags.h"
|
||||
|
||||
#include "long_running_common.hpp"
|
||||
#include "stats/stats.hpp"
|
||||
#include "stats/stats_rpc_messages.hpp"
|
||||
|
||||
class Graph500BfsClient : public TestClient {
|
||||
public:
|
||||
@ -55,7 +53,5 @@ int main(int argc, char **argv) {
|
||||
|
||||
RunMultithreadedTest(clients);
|
||||
|
||||
stats::StopStatsLogging();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -14,8 +14,6 @@
|
||||
|
||||
#include "json/json.hpp"
|
||||
|
||||
#include "stats/metrics.hpp"
|
||||
#include "stats/stats.hpp"
|
||||
#include "utils/timer.hpp"
|
||||
|
||||
#include "common.hpp"
|
||||
@ -35,9 +33,9 @@ DEFINE_int32(duration, 30, "Number of seconds to execute benchmark");
|
||||
DEFINE_string(group, "unknown", "Test group name");
|
||||
DEFINE_string(scenario, "unknown", "Test scenario name");
|
||||
|
||||
auto &executed_queries = stats::GetCounter("executed_queries");
|
||||
auto &executed_steps = stats::GetCounter("executed_steps");
|
||||
auto &serialization_errors = stats::GetCounter("serialization_errors");
|
||||
std::atomic<uint64_t> executed_queries;
|
||||
std::atomic<uint64_t> executed_steps;
|
||||
std::atomic<uint64_t> serialization_errors;
|
||||
|
||||
class TestClient {
|
||||
public:
|
||||
@ -61,7 +59,7 @@ class TestClient {
|
||||
runner_thread_ = std::thread([&] {
|
||||
while (keep_running_) {
|
||||
Step();
|
||||
executed_steps.Bump();
|
||||
++executed_steps;
|
||||
}
|
||||
});
|
||||
}
|
||||
@ -84,7 +82,7 @@ class TestClient {
|
||||
std::tie(result, retries) =
|
||||
ExecuteNTimesTillSuccess(client_, query, params, MAX_RETRIES);
|
||||
} catch (const utils::BasicException &e) {
|
||||
serialization_errors.Bump(MAX_RETRIES);
|
||||
serialization_errors += MAX_RETRIES;
|
||||
return std::experimental::nullopt;
|
||||
}
|
||||
auto wall_time = timer.Elapsed();
|
||||
@ -98,8 +96,8 @@ class TestClient {
|
||||
stats_[query].push_back(std::move(metadata));
|
||||
}
|
||||
}
|
||||
executed_queries.Bump();
|
||||
serialization_errors.Bump(retries);
|
||||
++executed_queries;
|
||||
serialization_errors += retries;
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -179,16 +177,11 @@ void RunMultithreadedTest(std::vector<std::unique_ptr<TestClient>> &clients) {
|
||||
auto it = aggregated_query_stats.insert({stat.first, Value(0.0)}).first;
|
||||
it->second = (it->second.ValueDouble() * old_count + stat.second) /
|
||||
(old_count + new_count);
|
||||
stats::LogStat(
|
||||
fmt::format("queries.{}.{}", query_stats.first, stat.first),
|
||||
(stat.second / new_count));
|
||||
}
|
||||
stats::LogStat(fmt::format("queries.{}.count", query_stats.first),
|
||||
new_count);
|
||||
}
|
||||
|
||||
out << "{\"num_executed_queries\": " << executed_queries.Value() << ", "
|
||||
<< "\"num_executed_steps\": " << executed_steps.Value() << ", "
|
||||
out << "{\"num_executed_queries\": " << executed_queries << ", "
|
||||
<< "\"num_executed_steps\": " << executed_steps << ", "
|
||||
<< "\"elapsed_time\": " << timer.Elapsed().count()
|
||||
<< ", \"queries\": [";
|
||||
utils::PrintIterable(
|
||||
|
@ -30,30 +30,8 @@ target_link_libraries(${test_prefix}binomial mg-utils)
|
||||
add_manual_test(bolt_client.cpp)
|
||||
target_link_libraries(${test_prefix}bolt_client mg-communication)
|
||||
|
||||
add_manual_test(card_fraud_generate_snapshot.cpp)
|
||||
target_link_libraries(${test_prefix}card_fraud_generate_snapshot mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_manual_test(card_fraud_local.cpp)
|
||||
target_link_libraries(${test_prefix}card_fraud_local mg-distributed kvstore_dummy_lib gtest)
|
||||
|
||||
add_manual_test(distributed_query_planner.cpp interactive_planning.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_query_planner mg-distributed
|
||||
kvstore_dummy_lib)
|
||||
if (READLINE_FOUND)
|
||||
target_link_libraries(${test_prefix}distributed_query_planner readline)
|
||||
endif()
|
||||
|
||||
add_manual_test(distributed_repl.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_repl mg-distributed kvstore_dummy_lib gtest readline)
|
||||
|
||||
add_manual_test(endinan.cpp)
|
||||
|
||||
add_manual_test(generate_snapshot.cpp)
|
||||
target_link_libraries(${test_prefix}generate_snapshot mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_manual_test(graph_500_generate_snapshot.cpp)
|
||||
target_link_libraries(${test_prefix}graph_500_generate_snapshot mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_manual_test(kvstore_console.cpp)
|
||||
target_link_libraries(${test_prefix}kvstore_console kvstore_lib gflags glog)
|
||||
|
||||
|
@ -6,11 +6,3 @@
|
||||
- ../../config # directory with config files
|
||||
outfile_paths: &OUTFILE_PATHS
|
||||
- \./memgraph/tests/qa/\.quality_assurance_status
|
||||
|
||||
- name: quality_assurance_distributed
|
||||
commands: TIMEOUT=300 ./continuous_integration --distributed
|
||||
infiles:
|
||||
- . # current directory
|
||||
- ../../build_debug/memgraph_distributed # memgraph distributed debug binary
|
||||
- ../../config # directory with config files
|
||||
outfile_paths: *OUTFILE_PATHS
|
||||
|
@ -22,9 +22,6 @@ endfunction(add_unit_test)
|
||||
add_unit_test(bolt_encoder.cpp)
|
||||
target_link_libraries(${test_prefix}bolt_encoder mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(concurrent_id_mapper_distributed.cpp)
|
||||
target_link_libraries(${test_prefix}concurrent_id_mapper_distributed mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(concurrent_id_mapper_single_node.cpp)
|
||||
target_link_libraries(${test_prefix}concurrent_id_mapper_single_node mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -34,9 +31,6 @@ target_link_libraries(${test_prefix}concurrent_map_access mg-single-node kvstore
|
||||
add_unit_test(concurrent_map.cpp)
|
||||
target_link_libraries(${test_prefix}concurrent_map mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(counters.cpp)
|
||||
target_link_libraries(${test_prefix}counters mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(cypher_main_visitor.cpp)
|
||||
target_link_libraries(${test_prefix}cypher_main_visitor mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -46,9 +40,6 @@ target_link_libraries(${test_prefix}database_key_index mg-single-node kvstore_du
|
||||
add_unit_test(database_label_property_index.cpp)
|
||||
target_link_libraries(${test_prefix}database_label_property_index mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(database_master.cpp)
|
||||
target_link_libraries(${test_prefix}database_master mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(database_transaction_timeout.cpp)
|
||||
target_link_libraries(${test_prefix}database_transaction_timeout mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -58,64 +49,19 @@ target_link_libraries(${test_prefix}datastructure_union_find mg-single-node kvst
|
||||
add_unit_test(deferred_deleter.cpp)
|
||||
target_link_libraries(${test_prefix}deferred_deleter mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_coordination.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_coordination mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_data_exchange.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_data_exchange mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_dgp_vertex_migrator.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_dgp_vertex_migrator mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_durability.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_durability mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_dynamic_worker.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_dynamic_worker mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_gc.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_gc mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_graph_db.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_graph_db mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_interpretation.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_interpretation mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_query_plan.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_query_plan mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_reset.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_reset mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_serialization.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_serialization mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_updates.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_updates mg-distributed kvstore_dummy_lib)
|
||||
|
||||
# TODO (buda): Replace token sharing with centralized solution and write an appropriate test.
|
||||
# add_unit_test(distributed_token_sharing.cpp)
|
||||
# target_link_libraries(${test_prefix}distributed_token_sharing memgraph_lib kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(bfs_distributed.cpp)
|
||||
target_link_libraries(${test_prefix}bfs_distributed mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(bfs_single_node.cpp)
|
||||
target_link_libraries(${test_prefix}bfs_single_node mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(distributed_dgp_partitioner.cpp)
|
||||
target_link_libraries(${test_prefix}distributed_dgp_partitioner mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(durability.cpp)
|
||||
target_link_libraries(${test_prefix}durability mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(dynamic_bitset.cpp)
|
||||
target_link_libraries(${test_prefix}dynamic_bitset mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(gid.cpp)
|
||||
target_link_libraries(${test_prefix}gid mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(graph_db_accessor.cpp)
|
||||
target_link_libraries(${test_prefix}graph_db_accessor mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -131,9 +77,6 @@ target_link_libraries(${test_prefix}interpreter mg-single-node kvstore_dummy_lib
|
||||
add_unit_test(kvstore.cpp)
|
||||
target_link_libraries(${test_prefix}kvstore kvstore_lib glog)
|
||||
|
||||
add_unit_test(metrics.cpp)
|
||||
target_link_libraries(${test_prefix}metrics mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(mvcc.cpp)
|
||||
target_link_libraries(${test_prefix}mvcc mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -179,9 +122,6 @@ target_link_libraries(${test_prefix}query_plan_match_filter_return mg-single-nod
|
||||
add_unit_test(query_plan.cpp)
|
||||
target_link_libraries(${test_prefix}query_plan mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(query_required_privileges.cpp)
|
||||
target_link_libraries(${test_prefix}query_required_privileges mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(query_semantic.cpp)
|
||||
target_link_libraries(${test_prefix}query_semantic mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -194,9 +134,6 @@ target_link_libraries(${test_prefix}queue mg-single-node kvstore_dummy_lib)
|
||||
add_unit_test(record_edge_vertex_accessor.cpp)
|
||||
target_link_libraries(${test_prefix}record_edge_vertex_accessor mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(serialization.cpp)
|
||||
target_link_libraries(${test_prefix}serialization mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(skiplist_access.cpp)
|
||||
target_link_libraries(${test_prefix}skiplist_access mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -218,15 +155,9 @@ target_link_libraries(${test_prefix}state_delta mg-single-node kvstore_dummy_lib
|
||||
add_unit_test(static_bitset.cpp)
|
||||
target_link_libraries(${test_prefix}static_bitset mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(storage_address.cpp)
|
||||
target_link_libraries(${test_prefix}storage_address mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(stripped.cpp)
|
||||
target_link_libraries(${test_prefix}stripped mg-single-node kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(transaction_engine_distributed.cpp)
|
||||
target_link_libraries(${test_prefix}transaction_engine_distributed mg-distributed kvstore_dummy_lib)
|
||||
|
||||
add_unit_test(transaction_engine_single_node.cpp)
|
||||
target_link_libraries(${test_prefix}transaction_engine_single_node mg-single-node kvstore_dummy_lib)
|
||||
|
||||
@ -253,9 +184,6 @@ target_link_libraries(${test_prefix}communication_buffer mg-communication)
|
||||
add_unit_test(network_timeouts.cpp)
|
||||
target_link_libraries(${test_prefix}network_timeouts mg-communication)
|
||||
|
||||
add_unit_test(rpc.cpp)
|
||||
target_link_libraries(${test_prefix}rpc mg-communication)
|
||||
|
||||
# Test data structures
|
||||
|
||||
add_unit_test(ring_buffer.cpp)
|
||||
@ -310,10 +238,6 @@ target_link_libraries(${test_prefix}utils_timestamp mg-utils)
|
||||
add_unit_test(utils_watchdog.cpp)
|
||||
target_link_libraries(${test_prefix}utils_watchdog mg-utils)
|
||||
|
||||
# Test mg-auth
|
||||
|
||||
add_unit_test(auth.cpp)
|
||||
target_link_libraries(${test_prefix}auth mg-auth kvstore_lib)
|
||||
|
||||
# Test LCP
|
||||
|
||||
|
@ -50,11 +50,6 @@ class TestSession : public Session<TestInputStream, TestOutputStream> {
|
||||
|
||||
void Abort() override {}
|
||||
|
||||
bool Authenticate(const std::string &username,
|
||||
const std::string &password) override {
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string query_;
|
||||
};
|
||||
|
@ -1894,474 +1894,6 @@ TYPED_TEST(CypherMainVisitorTest, UnionAll) {
|
||||
ASSERT_FALSE(return_clause->body_.distinct);
|
||||
}
|
||||
|
||||
template <typename AstGeneratorT>
|
||||
void check_auth_query(std::string input, AuthQuery::Action action,
|
||||
std::string user, std::string role,
|
||||
std::string user_or_role,
|
||||
std::experimental::optional<TypedValue> password,
|
||||
std::vector<AuthQuery::Privilege> privileges) {
|
||||
AstGeneratorT ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_ &&
|
||||
query->single_query_->clauses_.size() == 1U);
|
||||
auto auth_query =
|
||||
dynamic_cast<AuthQuery *>(query->single_query_->clauses_[0]);
|
||||
EXPECT_EQ(auth_query->action_, action);
|
||||
EXPECT_EQ(auth_query->user_, user);
|
||||
EXPECT_EQ(auth_query->role_, role);
|
||||
EXPECT_EQ(auth_query->user_or_role_, user_or_role);
|
||||
ASSERT_EQ(static_cast<bool>(auth_query->password_),
|
||||
static_cast<bool>(password));
|
||||
if (password) {
|
||||
ast_generator.CheckLiteral(auth_query->password_, *password);
|
||||
}
|
||||
EXPECT_EQ(auth_query->privileges_, privileges);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, UserOrRoleName) {
|
||||
ASSERT_THROW(TypeParam("CREATE ROLE `us|er`"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("CREATE ROLE `us er`"), SyntaxException);
|
||||
check_auth_query<TypeParam>("CREATE ROLE `user`",
|
||||
AuthQuery::Action::CREATE_ROLE, "", "user", "",
|
||||
{}, {});
|
||||
check_auth_query<TypeParam>("CREATE ROLE us___er",
|
||||
AuthQuery::Action::CREATE_ROLE, "", "us___er", "",
|
||||
{}, {});
|
||||
check_auth_query<TypeParam>("CREATE ROLE `us+er`",
|
||||
AuthQuery::Action::CREATE_ROLE, "", "us+er", "",
|
||||
{}, {});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, CreateRole) {
|
||||
ASSERT_THROW(TypeParam("CREATE ROLE"), SyntaxException);
|
||||
check_auth_query<TypeParam>("CREATE ROLE rola",
|
||||
AuthQuery::Action::CREATE_ROLE, "", "rola", "",
|
||||
{}, {});
|
||||
ASSERT_THROW(TypeParam("CREATE ROLE lagano rolamo"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, DropRole) {
|
||||
ASSERT_THROW(TypeParam("DROP ROLE"), SyntaxException);
|
||||
check_auth_query<TypeParam>("DROP ROLE rola", AuthQuery::Action::DROP_ROLE,
|
||||
"", "rola", "", {}, {});
|
||||
ASSERT_THROW(TypeParam("DROP ROLE lagano rolamo"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ShowRoles) {
|
||||
ASSERT_THROW(TypeParam("SHOW ROLES ROLES"), SyntaxException);
|
||||
check_auth_query<TypeParam>("SHOW ROLES", AuthQuery::Action::SHOW_ROLES, "",
|
||||
"", "", {}, {});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, CreateUser) {
|
||||
ASSERT_THROW(TypeParam("CREATE USER"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("CREATE USER 123"), SyntaxException);
|
||||
check_auth_query<TypeParam>("CREATE USER user",
|
||||
AuthQuery::Action::CREATE_USER, "user", "", "",
|
||||
{}, {});
|
||||
check_auth_query<TypeParam>("CREATE USER user IDENTIFIED BY 'password'",
|
||||
AuthQuery::Action::CREATE_USER, "user", "", "",
|
||||
"password", {});
|
||||
check_auth_query<TypeParam>("CREATE USER user IDENTIFIED BY ''",
|
||||
AuthQuery::Action::CREATE_USER, "user", "", "",
|
||||
"", {});
|
||||
check_auth_query<TypeParam>("CREATE USER user IDENTIFIED BY null",
|
||||
AuthQuery::Action::CREATE_USER, "user", "", "",
|
||||
TypedValue::Null, {});
|
||||
ASSERT_THROW(TypeParam("CRATE USER user IDENTIFIED BY password"),
|
||||
SyntaxException);
|
||||
ASSERT_THROW(TypeParam("CREATE USER user IDENTIFIED BY 5"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("CREATE USER user IDENTIFIED BY "), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, SetPassword) {
|
||||
ASSERT_THROW(TypeParam("SET PASSWORD FOR"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("SET PASSWORD FOR user "), SyntaxException);
|
||||
check_auth_query<TypeParam>("SET PASSWORD FOR user TO null",
|
||||
AuthQuery::Action::SET_PASSWORD, "user", "", "",
|
||||
TypedValue::Null, {});
|
||||
check_auth_query<TypeParam>("SET PASSWORD FOR user TO 'password'",
|
||||
AuthQuery::Action::SET_PASSWORD, "user", "", "",
|
||||
"password", {});
|
||||
ASSERT_THROW(TypeParam("SET PASSWORD FOR user To 5"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, DropUser) {
|
||||
ASSERT_THROW(TypeParam("DROP USER"), SyntaxException);
|
||||
check_auth_query<TypeParam>("DROP USER user", AuthQuery::Action::DROP_USER,
|
||||
"user", "", "", {}, {});
|
||||
ASSERT_THROW(TypeParam("DROP USER lagano rolamo"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ShowUsers) {
|
||||
ASSERT_THROW(TypeParam("SHOW USERS ROLES"), SyntaxException);
|
||||
check_auth_query<TypeParam>("SHOW USERS", AuthQuery::Action::SHOW_USERS, "",
|
||||
"", "", {}, {});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, SetRole) {
|
||||
ASSERT_THROW(TypeParam("SET ROLE"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("SET ROLE user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("SET ROLE FOR user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("SET ROLE FOR user TO"), SyntaxException);
|
||||
check_auth_query<TypeParam>("SET ROLE FOR user TO role",
|
||||
AuthQuery::Action::SET_ROLE, "user", "role", "",
|
||||
{}, {});
|
||||
check_auth_query<TypeParam>("SET ROLE FOR user TO null",
|
||||
AuthQuery::Action::SET_ROLE, "user", "null", "",
|
||||
{}, {});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ClearRole) {
|
||||
ASSERT_THROW(TypeParam("CLEAR ROLE"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("CLEAR ROLE user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("CLEAR ROLE FOR user TO"), SyntaxException);
|
||||
check_auth_query<TypeParam>("CLEAR ROLE FOR user",
|
||||
AuthQuery::Action::CLEAR_ROLE, "user", "", "", {},
|
||||
{});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, GrantPrivilege) {
|
||||
ASSERT_THROW(TypeParam("GRANT"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("GRANT TO user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("GRANT BLABLA TO user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("GRANT MATCH, TO user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("GRANT MATCH, BLABLA TO user"), SyntaxException);
|
||||
check_auth_query<TypeParam>("GRANT MATCH TO user",
|
||||
AuthQuery::Action::GRANT_PRIVILEGE, "", "",
|
||||
"user", {}, {AuthQuery::Privilege::MATCH});
|
||||
check_auth_query<TypeParam>(
|
||||
"GRANT MATCH, AUTH TO user", AuthQuery::Action::GRANT_PRIVILEGE, "", "",
|
||||
"user", {}, {AuthQuery::Privilege::MATCH, AuthQuery::Privilege::AUTH});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, DenyPrivilege) {
|
||||
ASSERT_THROW(TypeParam("DENY"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("DENY TO user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("DENY BLABLA TO user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("DENY MATCH, TO user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("DENY MATCH, BLABLA TO user"), SyntaxException);
|
||||
check_auth_query<TypeParam>("DENY MATCH TO user",
|
||||
AuthQuery::Action::DENY_PRIVILEGE, "", "", "user",
|
||||
{}, {AuthQuery::Privilege::MATCH});
|
||||
check_auth_query<TypeParam>(
|
||||
"DENY MATCH, AUTH TO user", AuthQuery::Action::DENY_PRIVILEGE, "", "",
|
||||
"user", {}, {AuthQuery::Privilege::MATCH, AuthQuery::Privilege::AUTH});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, RevokePrivilege) {
|
||||
ASSERT_THROW(TypeParam("REVOKE"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("REVOKE FROM user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("REVOKE BLABLA FROM user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("REVOKE MATCH, FROM user"), SyntaxException);
|
||||
ASSERT_THROW(TypeParam("REVOKE MATCH, BLABLA FROM user"), SyntaxException);
|
||||
check_auth_query<TypeParam>("REVOKE MATCH FROM user",
|
||||
AuthQuery::Action::REVOKE_PRIVILEGE, "", "",
|
||||
"user", {}, {AuthQuery::Privilege::MATCH});
|
||||
check_auth_query<TypeParam>(
|
||||
"REVOKE MATCH, AUTH FROM user", AuthQuery::Action::REVOKE_PRIVILEGE, "",
|
||||
"", "user", {},
|
||||
{AuthQuery::Privilege::MATCH, AuthQuery::Privilege::AUTH});
|
||||
check_auth_query<TypeParam>(
|
||||
"REVOKE ALL PRIVILEGES FROM user", AuthQuery::Action::REVOKE_PRIVILEGE,
|
||||
"", "", "user", {},
|
||||
{AuthQuery::Privilege::CREATE, AuthQuery::Privilege::DELETE,
|
||||
AuthQuery::Privilege::MATCH, AuthQuery::Privilege::MERGE,
|
||||
AuthQuery::Privilege::SET, AuthQuery::Privilege::REMOVE,
|
||||
AuthQuery::Privilege::INDEX, AuthQuery::Privilege::AUTH,
|
||||
AuthQuery::Privilege::STREAM});
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ShowPrivileges) {
|
||||
ASSERT_THROW(TypeParam("SHOW PRIVILEGES FOR"), SyntaxException);
|
||||
check_auth_query<TypeParam>("SHOW PRIVILEGES FOR user",
|
||||
AuthQuery::Action::SHOW_PRIVILEGES, "", "",
|
||||
"user", {}, {});
|
||||
ASSERT_THROW(TypeParam("SHOW PRIVILEGES FOR user1, user2"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ShowRoleForUser) {
|
||||
ASSERT_THROW(TypeParam("SHOW ROLE FOR "), SyntaxException);
|
||||
check_auth_query<TypeParam>("SHOW ROLE FOR user",
|
||||
AuthQuery::Action::SHOW_ROLE_FOR_USER, "user", "",
|
||||
"", {}, {});
|
||||
ASSERT_THROW(TypeParam("SHOW ROLE FOR user1, user2"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ShowUsersForRole) {
|
||||
ASSERT_THROW(TypeParam("SHOW USERS FOR "), SyntaxException);
|
||||
check_auth_query<TypeParam>("SHOW USERS FOR role",
|
||||
AuthQuery::Action::SHOW_USERS_FOR_ROLE, "",
|
||||
"role", "", {}, {});
|
||||
ASSERT_THROW(TypeParam("SHOW USERS FOR role1, role2"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, CreateStream) {
|
||||
auto check_create_stream =
|
||||
[](std::string input, const std::string &stream_name,
|
||||
const std::string &stream_uri, const std::string &stream_topic,
|
||||
const std::string &transform_uri,
|
||||
std::experimental::optional<int64_t> batch_interval_in_ms,
|
||||
std::experimental::optional<int64_t> batch_size) {
|
||||
TypeParam ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_);
|
||||
auto *single_query = query->single_query_;
|
||||
ASSERT_EQ(single_query->clauses_.size(), 1U);
|
||||
auto *create_stream =
|
||||
dynamic_cast<CreateStream *>(single_query->clauses_[0]);
|
||||
ASSERT_TRUE(create_stream);
|
||||
EXPECT_EQ(create_stream->stream_name_, stream_name);
|
||||
ASSERT_TRUE(create_stream->stream_uri_);
|
||||
ast_generator.CheckLiteral(create_stream->stream_uri_,
|
||||
TypedValue(stream_uri));
|
||||
ASSERT_TRUE(create_stream->stream_topic_);
|
||||
ast_generator.CheckLiteral(create_stream->stream_topic_,
|
||||
TypedValue(stream_topic));
|
||||
ASSERT_TRUE(create_stream->transform_uri_);
|
||||
ast_generator.CheckLiteral(create_stream->transform_uri_,
|
||||
TypedValue(transform_uri));
|
||||
if (batch_interval_in_ms) {
|
||||
ASSERT_TRUE(create_stream->batch_interval_in_ms_);
|
||||
ast_generator.CheckLiteral(create_stream->batch_interval_in_ms_,
|
||||
TypedValue(*batch_interval_in_ms));
|
||||
} else {
|
||||
EXPECT_EQ(create_stream->batch_interval_in_ms_, nullptr);
|
||||
}
|
||||
if (batch_size) {
|
||||
ASSERT_TRUE(create_stream->batch_size_);
|
||||
ast_generator.CheckLiteral(create_stream->batch_size_,
|
||||
TypedValue(*batch_size));
|
||||
} else {
|
||||
EXPECT_EQ(create_stream->batch_size_, nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
check_create_stream(
|
||||
"CREATE STREAM stream AS LOAD DATA KAFKA 'localhost' "
|
||||
"WITH TOPIC 'tropika' "
|
||||
"WITH TRANSFORM 'localhost/test.py'",
|
||||
"stream", "localhost", "tropika", "localhost/test.py",
|
||||
std::experimental::nullopt, std::experimental::nullopt);
|
||||
|
||||
check_create_stream(
|
||||
"CreaTE StreaM stream AS LOad daTA KAFKA 'localhost' "
|
||||
"WitH TopIC 'tropika' "
|
||||
"WITH TRAnsFORM 'localhost/test.py' bAtCH inTErvAL 168",
|
||||
"stream", "localhost", "tropika", "localhost/test.py", 168,
|
||||
std::experimental::nullopt);
|
||||
|
||||
check_create_stream(
|
||||
"CreaTE StreaM stream AS LOad daTA KAFKA 'localhost' "
|
||||
"WITH TopIC 'tropika' "
|
||||
"WITH TRAnsFORM 'localhost/test.py' bAtCH SizE 17",
|
||||
"stream", "localhost", "tropika", "localhost/test.py",
|
||||
std::experimental::nullopt, 17);
|
||||
|
||||
check_create_stream(
|
||||
"CreaTE StreaM stream AS LOad daTA KAFKA 'localhost' "
|
||||
"WitH TOPic 'tropika' "
|
||||
"WITH TRAnsFORM 'localhost/test.py' bAtCH inTErvAL 168 Batch SIze 17",
|
||||
"stream", "localhost", "tropika", "localhost/test.py", 168, 17);
|
||||
|
||||
EXPECT_THROW(check_create_stream(
|
||||
"CREATE STREAM stream AS LOAD DATA KAFKA 'localhost' "
|
||||
"WITH TRANSFORM 'localhost/test.py' BATCH INTERVAL 'jedan' ",
|
||||
"stream", "localhost", "tropika", "localhost/test.py", 168,
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_create_stream(
|
||||
"CREATE STREAM stream AS LOAD DATA KAFKA 'localhost' "
|
||||
"WITH TOPIC 'tropika' "
|
||||
"WITH TRANSFORM 'localhost/test.py' BATCH SIZE 'jedan' ",
|
||||
"stream", "localhost", "tropika", "localhost/test.py",
|
||||
std::experimental::nullopt, 17),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_create_stream(
|
||||
"CREATE STREAM 123 AS LOAD DATA KAFKA 'localhost' "
|
||||
"WITH TOPIC 'tropika' "
|
||||
"WITH TRANSFORM 'localhost/test.py' BATCH INTERVAL 168 ",
|
||||
"stream", "localhost", "tropika", "localhost/test.py", 168,
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_create_stream(
|
||||
"CREATE STREAM stream AS LOAD DATA KAFKA localhost "
|
||||
"WITH TOPIC 'tropika' "
|
||||
"WITH TRANSFORM 'localhost/test.py'",
|
||||
"stream", "localhost", "tropika", "localhost/test.py",
|
||||
std::experimental::nullopt, std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_create_stream(
|
||||
"CREATE STREAM stream AS LOAD DATA KAFKA 'localhost' "
|
||||
"WITH TOPIC 2"
|
||||
"WITH TRANSFORM localhost/test.py BATCH INTERVAL 168 ",
|
||||
"stream", "localhost", "tropika", "localhost/test.py", 168,
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_create_stream(
|
||||
"CREATE STREAM stream AS LOAD DATA KAFKA 'localhost' "
|
||||
"WITH TOPIC 'tropika'"
|
||||
"WITH TRANSFORM localhost/test.py BATCH INTERVAL 168 ",
|
||||
"stream", "localhost", "tropika", "localhost/test.py", 168,
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, DropStream) {
|
||||
auto check_drop_stream = [](std::string input,
|
||||
const std::string &stream_name) {
|
||||
TypeParam ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_);
|
||||
auto *single_query = query->single_query_;
|
||||
ASSERT_EQ(single_query->clauses_.size(), 1U);
|
||||
auto *drop_stream = dynamic_cast<DropStream *>(single_query->clauses_[0]);
|
||||
ASSERT_TRUE(drop_stream);
|
||||
EXPECT_EQ(drop_stream->stream_name_, stream_name);
|
||||
};
|
||||
|
||||
check_drop_stream("DRop stREAm stream", "stream");
|
||||
check_drop_stream("DRop stREAm strim", "strim");
|
||||
|
||||
EXPECT_THROW(check_drop_stream("DROp sTREAM", ""), SyntaxException);
|
||||
|
||||
EXPECT_THROW(check_drop_stream("DROP STreAM 123", "123"), SyntaxException);
|
||||
|
||||
EXPECT_THROW(check_drop_stream("DroP STREAM '123'", "123"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, ShowStreams) {
|
||||
auto check_show_streams = [](std::string input) {
|
||||
TypeParam ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_);
|
||||
auto *single_query = query->single_query_;
|
||||
ASSERT_EQ(single_query->clauses_.size(), 1U);
|
||||
auto *show_streams = dynamic_cast<ShowStreams *>(single_query->clauses_[0]);
|
||||
EXPECT_TRUE(show_streams);
|
||||
};
|
||||
|
||||
check_show_streams("SHOW STREAMS");
|
||||
|
||||
EXPECT_THROW(check_show_streams("SHOW STREAMS lololo"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, StartStopStream) {
|
||||
auto check_start_stop_stream =
|
||||
[](std::string input, const std::string &stream_name, bool is_start,
|
||||
std::experimental::optional<int64_t> limit_batches) {
|
||||
TypeParam ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_);
|
||||
auto *single_query = query->single_query_;
|
||||
ASSERT_EQ(single_query->clauses_.size(), 1U);
|
||||
auto *start_stop_stream =
|
||||
dynamic_cast<StartStopStream *>(single_query->clauses_[0]);
|
||||
EXPECT_TRUE(start_stop_stream);
|
||||
|
||||
EXPECT_EQ(start_stop_stream->stream_name_, stream_name);
|
||||
EXPECT_EQ(start_stop_stream->is_start_, is_start);
|
||||
|
||||
if (limit_batches) {
|
||||
ASSERT_TRUE(is_start);
|
||||
ASSERT_TRUE(start_stop_stream->limit_batches_);
|
||||
ast_generator.CheckLiteral(start_stop_stream->limit_batches_,
|
||||
TypedValue(*limit_batches));
|
||||
} else {
|
||||
EXPECT_EQ(start_stop_stream->limit_batches_, nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
check_start_stop_stream("stARt STreaM STREAM", "STREAM", true,
|
||||
std::experimental::nullopt);
|
||||
check_start_stop_stream("stARt STreaM strim", "strim", true,
|
||||
std::experimental::nullopt);
|
||||
check_start_stop_stream("StARt STreAM strim LimIT 10 BATchES", "strim", true,
|
||||
10);
|
||||
|
||||
check_start_stop_stream("StoP StrEAM strim", "strim", false,
|
||||
std::experimental::nullopt);
|
||||
|
||||
EXPECT_THROW(check_start_stop_stream("staRT STReaM 'strim'", "strim", true,
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_start_stop_stream("sTART STReaM strim LImiT 'dva' BATCheS",
|
||||
"strim", true, 2),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_start_stop_stream("StoP STreAM 'strim'", "strim", false,
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_start_stop_stream("STOp sTREAM strim LIMit 2 baTCHES",
|
||||
"strim", false, 2),
|
||||
SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, StartStopAllStreams) {
|
||||
auto check_start_stop_all_streams = [](std::string input, bool is_start) {
|
||||
TypeParam ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_);
|
||||
auto *single_query = query->single_query_;
|
||||
ASSERT_EQ(single_query->clauses_.size(), 1U);
|
||||
auto *start_stop_all_streams =
|
||||
dynamic_cast<StartStopAllStreams *>(single_query->clauses_[0]);
|
||||
EXPECT_TRUE(start_stop_all_streams);
|
||||
|
||||
EXPECT_EQ(start_stop_all_streams->is_start_, is_start);
|
||||
};
|
||||
|
||||
check_start_stop_all_streams("STarT AlL StreAMs", true);
|
||||
|
||||
check_start_stop_all_streams("StoP aLL STrEAMs", false);
|
||||
|
||||
EXPECT_THROW(check_start_stop_all_streams("StaRT aLL STreAM", true),
|
||||
SyntaxException);
|
||||
|
||||
EXPECT_THROW(check_start_stop_all_streams("SToP AlL STREaM", false),
|
||||
SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, TestStream) {
|
||||
auto check_test_stream =
|
||||
[](std::string input, const std::string &stream_name,
|
||||
std::experimental::optional<int64_t> limit_batches) {
|
||||
TypeParam ast_generator(input);
|
||||
auto *query = ast_generator.query_;
|
||||
ASSERT_TRUE(query->single_query_);
|
||||
auto *single_query = query->single_query_;
|
||||
ASSERT_EQ(single_query->clauses_.size(), 1U);
|
||||
auto *test_stream =
|
||||
dynamic_cast<TestStream *>(single_query->clauses_[0]);
|
||||
EXPECT_TRUE(test_stream);
|
||||
|
||||
EXPECT_EQ(test_stream->stream_name_, stream_name);
|
||||
|
||||
if (limit_batches) {
|
||||
ASSERT_TRUE(test_stream->limit_batches_);
|
||||
ast_generator.CheckLiteral(test_stream->limit_batches_,
|
||||
TypedValue(*limit_batches));
|
||||
} else {
|
||||
EXPECT_EQ(test_stream->limit_batches_, nullptr);
|
||||
}
|
||||
};
|
||||
|
||||
check_test_stream("TesT STreaM strim", "strim", std::experimental::nullopt);
|
||||
check_test_stream("TesT STreaM STREAM", "STREAM", std::experimental::nullopt);
|
||||
check_test_stream("tESt STreAM STREAM LimIT 10 BATchES", "STREAM", 10);
|
||||
|
||||
check_test_stream("Test StrEAM STREAM", "STREAM", std::experimental::nullopt);
|
||||
|
||||
EXPECT_THROW(check_test_stream("tEST STReaM 'strim'", "strim",
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(
|
||||
check_test_stream("test STReaM strim LImiT 'dva' BATCheS", "strim", 2),
|
||||
SyntaxException);
|
||||
EXPECT_THROW(check_test_stream("test STreAM 'strim'", "strim",
|
||||
std::experimental::nullopt),
|
||||
SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, TestExplainRegularQuery) {
|
||||
{
|
||||
TypeParam ast_generator("RETURN n");
|
||||
@ -2378,17 +1910,4 @@ TYPED_TEST(CypherMainVisitorTest, TestExplainExplainQuery) {
|
||||
SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, TestExplainAuthQuery) {
|
||||
TypeParam ast_generator("SHOW ROLES");
|
||||
EXPECT_FALSE(ast_generator.query_->explain_);
|
||||
EXPECT_THROW(TypeParam ast_generator("EXPLAIN SHOW ROLES"), SyntaxException);
|
||||
}
|
||||
|
||||
TYPED_TEST(CypherMainVisitorTest, TestExplainStreamQuery) {
|
||||
TypeParam ast_generator("SHOW STREAMS");
|
||||
EXPECT_FALSE(ast_generator.query_->explain_);
|
||||
EXPECT_THROW(TypeParam ast_generator("EXPLAIN SHOW STREAMS"),
|
||||
SyntaxException);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -16,8 +16,6 @@
|
||||
#include "query/plan/operator.hpp"
|
||||
#include "query/plan/planner.hpp"
|
||||
|
||||
#include <capnp/message.h>
|
||||
|
||||
#include "query_common.hpp"
|
||||
|
||||
namespace query {
|
||||
@ -1319,153 +1317,4 @@ TYPED_TEST(TestPlanner, ReturnAsteriskOmitsLambdaSymbols) {
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, AuthQuery) {
|
||||
// Check if everything is properly forwarded from ast node to the operator
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query =
|
||||
QUERY(SINGLE_QUERY(AUTH_QUERY(query::AuthQuery::Action::DROP_ROLE, "user",
|
||||
"role", "user_or_role", LITERAL("password"),
|
||||
std::vector<query::AuthQuery::Privilege>(
|
||||
{query::AuthQuery::Privilege::MATCH,
|
||||
query::AuthQuery::Privilege::AUTH}))));
|
||||
CheckPlan<TypeParam>(
|
||||
query, storage,
|
||||
ExpectAuthHandler(query::AuthQuery::Action::DROP_ROLE, "user", "role",
|
||||
"user_or_role", LITERAL("password"),
|
||||
{query::AuthQuery::Privilege::MATCH,
|
||||
query::AuthQuery::Privilege::AUTH}));
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, CreateStream) {
|
||||
std::string stream_name("kafka"), stream_uri("localhost:1234"),
|
||||
stream_topic("tropik"), transform_uri("localhost:1234/file.py");
|
||||
int64_t batch_interval_in_ms = 100;
|
||||
int64_t batch_size = 10;
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query =
|
||||
QUERY(SINGLE_QUERY(CREATE_STREAM(stream_name, stream_uri, stream_topic,
|
||||
transform_uri, nullptr, nullptr)));
|
||||
auto expected = ExpectCreateStream(
|
||||
stream_name, LITERAL(stream_uri), LITERAL(stream_topic),
|
||||
LITERAL(transform_uri), nullptr, nullptr);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(
|
||||
CREATE_STREAM(stream_name, stream_uri, stream_topic, transform_uri,
|
||||
LITERAL(batch_interval_in_ms), nullptr)));
|
||||
auto expected = ExpectCreateStream(
|
||||
stream_name, LITERAL(stream_uri), LITERAL(stream_topic),
|
||||
LITERAL(transform_uri), LITERAL(batch_interval_in_ms), nullptr);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(
|
||||
CREATE_STREAM(stream_name, stream_uri, stream_topic, transform_uri,
|
||||
nullptr, LITERAL(batch_size))));
|
||||
auto expected = ExpectCreateStream(
|
||||
stream_name, LITERAL(stream_uri), LITERAL(stream_topic),
|
||||
LITERAL(transform_uri), nullptr, LITERAL(batch_size));
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(
|
||||
CREATE_STREAM(stream_name, stream_uri, stream_topic, transform_uri,
|
||||
LITERAL(batch_interval_in_ms), LITERAL(batch_size))));
|
||||
auto expected =
|
||||
ExpectCreateStream(stream_name, LITERAL(stream_uri),
|
||||
LITERAL(stream_topic), LITERAL(transform_uri),
|
||||
LITERAL(batch_interval_in_ms), LITERAL(batch_size));
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, DropStream) {
|
||||
std::string stream_name("kafka");
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(DROP_STREAM(stream_name)));
|
||||
auto expected = ExpectDropStream(stream_name);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, ShowStreams) {
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(SHOW_STREAMS));
|
||||
auto expected = ExpectShowStreams();
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, StartStopStream) {
|
||||
std::string stream_name("kafka");
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(START_STREAM(stream_name, nullptr)));
|
||||
auto expected = ExpectStartStopStream(stream_name, true, nullptr);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto limit_batches = LITERAL(10);
|
||||
auto *query = QUERY(SINGLE_QUERY(START_STREAM(stream_name, limit_batches)));
|
||||
auto expected = ExpectStartStopStream(stream_name, true, limit_batches);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(STOP_STREAM(stream_name)));
|
||||
auto expected = ExpectStartStopStream(stream_name, false, nullptr);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, StartStopAllStreams) {
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(START_ALL_STREAMS));
|
||||
auto expected = ExpectStartStopAllStreams(true);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(STOP_ALL_STREAMS));
|
||||
auto expected = ExpectStartStopAllStreams(false);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
}
|
||||
|
||||
TYPED_TEST(TestPlanner, TestStream) {
|
||||
std::string stream_name("kafka");
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto *query = QUERY(SINGLE_QUERY(TEST_STREAM(stream_name, nullptr)));
|
||||
auto expected = ExpectTestStream(stream_name, nullptr);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
{
|
||||
FakeDbAccessor dba;
|
||||
AstStorage storage;
|
||||
auto limit_batches = LITERAL(10);
|
||||
auto *query = QUERY(SINGLE_QUERY(TEST_STREAM(stream_name, limit_batches)));
|
||||
auto expected = ExpectTestStream(stream_name, limit_batches);
|
||||
CheckPlan<TypeParam>(query, storage, expected);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
@ -93,15 +93,6 @@ class PlanChecker : public virtual HierarchicalLogicalOperatorVisitor {
|
||||
return false;
|
||||
}
|
||||
|
||||
VISIT(AuthHandler);
|
||||
|
||||
VISIT(CreateStream);
|
||||
VISIT(DropStream);
|
||||
VISIT(ShowStreams);
|
||||
VISIT(StartStopStream);
|
||||
VISIT(StartStopAllStreams);
|
||||
VISIT(TestStream);
|
||||
|
||||
PRE_VISIT(Explain);
|
||||
|
||||
#undef PRE_VISIT
|
||||
@ -151,7 +142,6 @@ using ExpectLimit = OpChecker<Limit>;
|
||||
using ExpectOrderBy = OpChecker<OrderBy>;
|
||||
using ExpectUnwind = OpChecker<Unwind>;
|
||||
using ExpectDistinct = OpChecker<Distinct>;
|
||||
using ExpectShowStreams = OpChecker<ShowStreams>;
|
||||
|
||||
class ExpectExpandVariable : public OpChecker<ExpandVariable> {
|
||||
public:
|
||||
@ -323,39 +313,6 @@ class ExpectScanAllByLabelPropertyRange
|
||||
std::experimental::optional<ScanAllByLabelPropertyRange::Bound> upper_bound_;
|
||||
};
|
||||
|
||||
class ExpectAuthHandler : public OpChecker<AuthHandler> {
|
||||
public:
|
||||
ExpectAuthHandler(query::AuthQuery::Action action, std::string user,
|
||||
std::string role, std::string user_or_role,
|
||||
query::Expression *password,
|
||||
std::vector<query::AuthQuery::Privilege> privileges)
|
||||
: action_(action),
|
||||
user_(user),
|
||||
role_(role),
|
||||
user_or_role_(user_or_role),
|
||||
password_(password),
|
||||
privileges_(privileges) {}
|
||||
|
||||
void ExpectOp(AuthHandler &auth_handler, const SymbolTable &) override {
|
||||
EXPECT_EQ(auth_handler.action_, action_);
|
||||
EXPECT_EQ(auth_handler.user_, user_);
|
||||
EXPECT_EQ(auth_handler.role_, role_);
|
||||
EXPECT_EQ(auth_handler.user_or_role_, user_or_role_);
|
||||
// TODO(mtomic): We need to somehow test the password expression.
|
||||
EXPECT_TRUE(password_);
|
||||
EXPECT_TRUE(auth_handler.password_);
|
||||
EXPECT_EQ(auth_handler.privileges_, privileges_);
|
||||
}
|
||||
|
||||
private:
|
||||
query::AuthQuery::Action action_;
|
||||
std::string user_;
|
||||
std::string role_;
|
||||
std::string user_or_role_;
|
||||
query::Expression *password_{nullptr};
|
||||
std::vector<query::AuthQuery::Privilege> privileges_;
|
||||
};
|
||||
|
||||
class ExpectCreateIndex : public OpChecker<CreateIndex> {
|
||||
public:
|
||||
ExpectCreateIndex(storage::Label label, storage::Property property)
|
||||
@ -393,130 +350,6 @@ class ExpectCartesian : public OpChecker<Cartesian> {
|
||||
const std::list<std::unique_ptr<BaseOpChecker>> &right_;
|
||||
};
|
||||
|
||||
class ExpectCreateStream : public OpChecker<CreateStream> {
|
||||
public:
|
||||
ExpectCreateStream(std::string stream_name, query::Expression *stream_uri,
|
||||
query::Expression *stream_topic,
|
||||
query::Expression *transform_uri,
|
||||
query::Expression *batch_interval_in_ms,
|
||||
query::Expression *batch_size)
|
||||
: stream_name_(stream_name),
|
||||
stream_uri_(stream_uri),
|
||||
stream_topic_(stream_topic),
|
||||
transform_uri_(transform_uri),
|
||||
batch_interval_in_ms_(batch_interval_in_ms),
|
||||
batch_size_(batch_size) {}
|
||||
|
||||
void ExpectOp(CreateStream &create_stream, const SymbolTable &) override {
|
||||
EXPECT_EQ(create_stream.stream_name_, stream_name_);
|
||||
// TODO: Proper expression equality
|
||||
EXPECT_EQ(typeid(create_stream.stream_uri_).hash_code(),
|
||||
typeid(stream_uri_).hash_code());
|
||||
EXPECT_EQ(typeid(create_stream.stream_topic_).hash_code(),
|
||||
typeid(stream_topic_).hash_code());
|
||||
EXPECT_EQ(typeid(create_stream.transform_uri_).hash_code(),
|
||||
typeid(transform_uri_).hash_code());
|
||||
if (batch_interval_in_ms_ && create_stream.batch_interval_in_ms_) {
|
||||
EXPECT_EQ(typeid(create_stream.batch_interval_in_ms_).hash_code(),
|
||||
typeid(batch_interval_in_ms_).hash_code());
|
||||
} else {
|
||||
EXPECT_TRUE(batch_interval_in_ms_ == nullptr &&
|
||||
create_stream.batch_interval_in_ms_ == nullptr);
|
||||
}
|
||||
if (batch_size_ && create_stream.batch_size_) {
|
||||
EXPECT_EQ(typeid(create_stream.batch_size_).hash_code(),
|
||||
typeid(batch_size_).hash_code());
|
||||
} else {
|
||||
EXPECT_TRUE(batch_size_ == nullptr &&
|
||||
create_stream.batch_size_ == nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::string stream_name_;
|
||||
query::Expression *stream_uri_;
|
||||
query::Expression *stream_topic_;
|
||||
query::Expression *transform_uri_;
|
||||
query::Expression *batch_interval_in_ms_;
|
||||
query::Expression *batch_size_;
|
||||
};
|
||||
|
||||
class ExpectDropStream : public OpChecker<DropStream> {
|
||||
public:
|
||||
explicit ExpectDropStream(std::string stream_name)
|
||||
: stream_name_(stream_name) {}
|
||||
|
||||
void ExpectOp(DropStream &drop_stream, const SymbolTable &) override {
|
||||
EXPECT_EQ(drop_stream.stream_name_, stream_name_);
|
||||
}
|
||||
|
||||
private:
|
||||
std::string stream_name_;
|
||||
};
|
||||
|
||||
class ExpectStartStopStream : public OpChecker<StartStopStream> {
|
||||
public:
|
||||
ExpectStartStopStream(std::string stream_name, bool is_start,
|
||||
query::Expression *limit_batches)
|
||||
: stream_name_(stream_name),
|
||||
is_start_(is_start),
|
||||
limit_batches_(limit_batches) {}
|
||||
|
||||
void ExpectOp(StartStopStream &start_stop_stream,
|
||||
const SymbolTable &) override {
|
||||
EXPECT_EQ(start_stop_stream.stream_name_, stream_name_);
|
||||
EXPECT_EQ(start_stop_stream.is_start_, is_start_);
|
||||
// TODO: Proper expression equality
|
||||
if (limit_batches_ && start_stop_stream.limit_batches_) {
|
||||
EXPECT_EQ(typeid(start_stop_stream.limit_batches_).hash_code(),
|
||||
typeid(limit_batches_).hash_code());
|
||||
} else {
|
||||
EXPECT_TRUE(limit_batches_ == nullptr &&
|
||||
start_stop_stream.limit_batches_ == nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::string stream_name_;
|
||||
bool is_start_;
|
||||
query::Expression *limit_batches_;
|
||||
};
|
||||
|
||||
class ExpectStartStopAllStreams : public OpChecker<StartStopAllStreams> {
|
||||
public:
|
||||
explicit ExpectStartStopAllStreams(bool is_start) : is_start_(is_start) {}
|
||||
|
||||
void ExpectOp(StartStopAllStreams &start_stop_all_streams,
|
||||
const SymbolTable &) override {
|
||||
EXPECT_EQ(start_stop_all_streams.is_start_, is_start_);
|
||||
}
|
||||
|
||||
private:
|
||||
bool is_start_;
|
||||
};
|
||||
|
||||
class ExpectTestStream : public OpChecker<TestStream> {
|
||||
public:
|
||||
ExpectTestStream(std::string stream_name, query::Expression *limit_batches)
|
||||
: stream_name_(stream_name), limit_batches_(limit_batches) {}
|
||||
|
||||
void ExpectOp(TestStream &test_stream, const SymbolTable &) override {
|
||||
EXPECT_EQ(test_stream.stream_name_, stream_name_);
|
||||
// TODO: Proper expression equality
|
||||
if (limit_batches_ && test_stream.limit_batches_) {
|
||||
EXPECT_EQ(typeid(test_stream.limit_batches_).hash_code(),
|
||||
typeid(limit_batches_).hash_code());
|
||||
} else {
|
||||
EXPECT_TRUE(limit_batches_ == nullptr &&
|
||||
test_stream.limit_batches_ == nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::string stream_name_;
|
||||
query::Expression *limit_batches_;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
std::list<std::unique_ptr<BaseOpChecker>> MakeCheckers(T arg) {
|
||||
std::list<std::unique_ptr<BaseOpChecker>> l;
|
||||
|
@ -2,10 +2,6 @@
|
||||
add_executable(mg_import_csv mg_import_csv/main.cpp)
|
||||
target_link_libraries(mg_import_csv mg-single-node kvstore_dummy_lib)
|
||||
|
||||
# StatsD Target
|
||||
add_executable(mg_statsd mg_statsd/main.cpp)
|
||||
target_link_libraries(mg_statsd mg-communication mg-io mg-utils mg-stats)
|
||||
|
||||
# Generate a version.hpp file
|
||||
set(VERSION_STRING ${memgraph_VERSION})
|
||||
configure_file(../../src/version.hpp.in version.hpp @ONLY)
|
||||
|
@ -3,9 +3,6 @@ include_directories(SYSTEM ${GTEST_INCLUDE_DIR})
|
||||
add_executable(mg_recovery_check mg_recovery_check.cpp)
|
||||
target_link_libraries(mg_recovery_check mg-single-node gtest gtest_main kvstore_dummy_lib)
|
||||
|
||||
add_executable(mg_statsd_client statsd/mg_statsd_client.cpp)
|
||||
target_link_libraries(mg_statsd_client mg-communication mg-io mg-utils mg-stats)
|
||||
|
||||
# Copy CSV data to CMake build dir
|
||||
configure_file(csv/comment_nodes.csv csv/comment_nodes.csv COPYONLY)
|
||||
configure_file(csv/comment_nodes_2.csv csv/comment_nodes_2.csv COPYONLY)
|
||||
|
Loading…
Reference in New Issue
Block a user