NeoRunner + Harness bugfix

Reviewers: mferencevic, buda

Reviewed By: mferencevic

Subscribers: mferencevic, pullbot

Differential Revision: https://phabricator.memgraph.io/D588
This commit is contained in:
Mislav Bradac 2017-07-29 17:03:34 +02:00
parent 04c2ab3ba9
commit 68e78c417b
36 changed files with 579 additions and 90 deletions

3
.gitignore vendored
View File

@ -1,4 +1,5 @@
*.breakpoint
*.data
*.dSYM/
*.o
*.out
@ -12,6 +13,7 @@
.gdb_history
.idea
.ycm_extra_conf.pyc
.temp/
Testing/
build
build/
@ -31,3 +33,4 @@ src/query/frontend/opencypher/generated/
tags
ve/
ve3/
perf.data*

37
config/benchmarking.conf Normal file
View File

@ -0,0 +1,37 @@
# MEMGRAPH DEFAULT TESTING CONFIG
# NOTE: all paths are relative to the run folder
# (where the executable is runned)
# directory to the codes which will be compiled
--compile-directory=compiled
# path to the template (cpp) for codes generation
--template-cpp-path=template/plan_template_cpp
# directory to the folder with snapshots
--snapshot-directory=snapshots
# cleaning cycle interval
# if set to -1 the GC will not run
--gc-cycle-sec=-1
# snapshot cycle interval
# if set to -1 the snapshooter will not run
--snapshot-cycle-sec=-1
# create snapshot disabled on db exit
--snapshot-on-db-exit=false
# max number of snapshots which will be kept on the disk at some point
# if set to -1 the max number of snapshots is unlimited
--max-retained-snapshots=-1
# by default query engine runs in interpret mode
--interpret=true
# database recovering is disabled by default
--recover-on-startup=false
# use ast caching
--ast-cache=true

8
install_neo Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/env bash
# You should run this script as a root.
wget -O - http://debian.neo4j.org/neotechnology.gpg.key | apt-key add -
echo 'deb http://debian.neo4j.org/repo stable/' > /etc/apt/sources.list.d/neo4j.list
apt-get update
apt-get install neo4j

View File

@ -1,5 +1,5 @@
BATCH_SIZE = 100
VERTEX_COUNT = 50000
VERTEX_COUNT = 100000
for i in range(VERTEX_COUNT):
print("CREATE (n%d {x: %d})" % (i, i))

View File

@ -1,5 +1,5 @@
BATCH_SIZE = 100
VERTEX_COUNT = 10000
VERTEX_COUNT = 1000000
for i in range(VERTEX_COUNT):
print("CREATE (n%d {x: %d})" % (i, i))

View File

@ -1,3 +1,3 @@
{
"iterations": 20
"iterations": 3
}

View File

@ -0,0 +1 @@
MATCH (n)-[x]->(m) DELETE x

View File

@ -1 +0,0 @@
MATCH (a), (b) CREATE (a)-[:Type]->(b)

View File

@ -0,0 +1,12 @@
EDGE_COUNT = 100000
BATCH_SIZE = 50
query = []
while EDGE_COUNT > 0:
query.append("MATCH (a), (b)")
new_edges = min(BATCH_SIZE, EDGE_COUNT)
query.append('CREATE (a)-[:Type]->(b) ' * new_edges)
query.append(";")
EDGE_COUNT -= new_edges
print(" ".join(query))

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -0,0 +1,11 @@
PATTERN_COUNT = 100000
BATCH_SIZE = 50
query = []
while PATTERN_COUNT > 0:
new_patterns = min(BATCH_SIZE, PATTERN_COUNT)
query.append('CREATE ()-[:Type]->() ' * new_patterns)
query.append(";")
PATTERN_COUNT -= new_patterns
print(" ".join(query))

View File

@ -1 +0,0 @@
CREATE ()-[:Type]->()-[:Type]->()-[:Type]->()-[:Type]->()-[:Type]->()-[:Type]->()-[:Type]->()

View File

@ -1 +0,0 @@
CREATE ()-[:Type]->()

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -1 +0,0 @@
CREATE ()

View File

@ -0,0 +1,11 @@
VERTEX_COUNT = 100000
BATCH_SIZE = 50
query = []
while VERTEX_COUNT > 0:
new_vertices = min(BATCH_SIZE, VERTEX_COUNT)
query.append('CREATE ()' * new_vertices)
query.append(";")
VERTEX_COUNT -= new_vertices
print(" ".join(query))

View File

@ -0,0 +1 @@
MATCH (n) DETACH DELETE n

View File

@ -1 +0,0 @@
CREATE (:L1:L2:L3:L4:L5:L6:L7 {p1: true, p2: 42, p3: "Here is some text that is not extremely short", p4:"Short text", p5: 234.434, p6: 11.11, p7: false})

View File

@ -0,0 +1,15 @@
VERTEX_COUNT = 100000
BATCH_SIZE = 50
query = []
while VERTEX_COUNT > 0:
new_vertices = min(BATCH_SIZE, VERTEX_COUNT)
query.append('CREATE (:L1:L2:L3:L4:L5:L6:L7 '
'{p1: true, p2: 42, '
'p3: "Here is some text that is not extremely short", '
'p4:"Short text", p5: 234.434, p6: 11.11, p7: false})'
* new_vertices)
query.append(";")
VERTEX_COUNT -= new_vertices
print(" ".join(query))

View File

@ -1,3 +1,3 @@
from setup import create_edges
create_edges(50000, 1000)
create_edges(10000, 500)

View File

@ -1,3 +1,3 @@
from setup import create_vertices
create_vertices(1000)
create_vertices(500)

View File

@ -1,4 +1,4 @@
from setup import create_vertices, create_edges
create_vertices(1000)
create_edges(50000, 1000)
create_vertices(500)
create_edges(10000, 500)

View File

@ -1,4 +0,0 @@
RETURN
1 + 3, 2 - 1, 2 * 5, 5 / 2, 5 % 5, -5,
1.4 + 3.3, 6.2 - 5.4, 6.5 * 1.2, 6.6 / 1.2, 8.7 % 3.2, -6.6,
"Flo" + "Lasta"

View File

@ -0,0 +1,13 @@
def generate(expressions, repetitions):
idx = 0
def get_alias():
nonlocal idx
idx += 1
return "a" + str(idx)
query = []
for i in range(repetitions):
for expression in expressions:
query.append(expression + " as " + get_alias())
return "RETURN " + ", ".join(query)

View File

@ -1 +0,0 @@
RETURN 1 < 2, 2 = 3, 6.66 < 10.2, 3.14 = 3.2, "Ana" < "Ivana", "Ana" = "Mmmmm", Null < Null, Null = Null

View File

@ -1,3 +1,3 @@
{
"iterations": 200
"iterations": 20
}

View File

@ -0,0 +1,10 @@
import common
expressions = ['1 + 3', '2 - 1', '2 * 5', '5 / 2', '5 % 5', '-5' + '1.4 + 3.3',
'6.2 - 5.4', '6.5 * 1.2', '6.6 / 1.2', '8.7 % 3.2', '-6.6',
'"Flo" + "Lasta"', 'true AND false', 'true OR false',
'true XOR false', 'NOT true', '1 < 2', '2 = 3', '6.66 < 10.2',
'3.14 = 3.2', '"Ana" < "Ivana"', '"Ana" = "Mmmmm"',
'Null < Null', 'Null = Null']
print(common.generate(expressions, 30))

View File

@ -1 +0,0 @@
RETURN true AND false, true OR false, true XOR false, NOT true

View File

@ -1,3 +1,3 @@
{
"iterations": 10
"iterations": 3
}

View File

@ -8,7 +8,7 @@ from random import randint
def rint(upper_bound_exclusive):
return randint(0, upper_bound_exclusive - 1)
VERTEX_COUNT = 100
VERTEX_COUNT = 10000
EDGE_COUNT = VERTEX_COUNT * 3
# numbers of *different* labels, edge types and properties

View File

@ -25,26 +25,26 @@ Note that 'metadata' are only valid if the return_code is 0
import sys
import os
import time
import json
import io
from contextlib import redirect_stderr
from multiprocessing import Pool
from functools import partial
# tests/stress dir, that's the place of common.py.
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))), "stress"))
import time
import json
from argparse import ArgumentParser
from contextlib import redirect_stderr
import io
from multiprocessing import Pool
from common import connection_argument_parser, execute_till_success, \
argument_driver
from functools import partial
from neo4j.v1 import GraphDatabase, basic_auth
# string constants
RETURN_CODE = "return_code"
ERROR_MSG = "error_msg"
WALL_TIME = "wall_time"
def _prepare_for_json(obj):
if isinstance(obj, dict):
return {k: _prepare_for_json(v) for k, v in obj.items()}
@ -61,11 +61,13 @@ def _print_dict(d):
def _run_query(args, query, self):
if not hasattr(self, "driver"):
# TODO: this driver and session is never closed.
self.driver = argument_driver(args)
self.session = self.driver.session()
return execute_till_success(self.session, query)[2]
_run_query.__defaults__ = (_run_query,)
def main():
argp = connection_argument_parser()
argp.add_argument("--num-workers", type=int, default=1)
@ -79,7 +81,7 @@ def main():
_print_dict({RETURN_CODE: 1, ERROR_MSG: "Invalid cmd-line arguments"})
sys.exit(1)
queries = sys.stdin.read().split("\n")
queries = filter(lambda x: x.strip() != '', sys.stdin.read().split("\n"))
# Execute the queries.
metadatas = []

View File

@ -4,7 +4,6 @@
import logging
import os
from os import path
import requests
import time
import itertools
import json
@ -13,6 +12,7 @@ from argparse import ArgumentParser
from collections import OrderedDict
from collections import defaultdict
import tempfile
import shutil
import jail_faker as jail
from bolt_client import WALL_TIME
@ -30,7 +30,7 @@ class _QuerySuite:
"""
# what the QuerySuite can work with
KNOWN_KEYS = {"config", "setup", "itersetup", "run", "iterteardown",
"teardown"}
"teardown", "common"}
summary = "Summary:\n{:>30}{:>30}{:>30}{:>30}{:>30}\n".format(
"scenario_name", "query_parsing_time", "query_planning_time",
"query_plan_execution_time", WALL_TIME)
@ -94,6 +94,7 @@ class _QuerySuite:
groups_root/
groupname1/
config.json
common.py
setup.FILE_TYPE
teardown.FILE_TYPE
itersetup.FILE_TYPE
@ -187,6 +188,7 @@ class _QuerySuite:
measurements = []
measurement_sums = defaultdict(float)
def add_measurement(dictionary, iteration, key):
if key in dictionary:
measurement = {"target": key,
@ -256,7 +258,6 @@ class _QuerySuite:
self.summary += "{:>30}".format(time)
self.summary += "\n"
def runners(self):
""" Which runners can execute a QuerySuite scenario """
assert False, "This is a base class, use one of derived suites"
@ -273,7 +274,7 @@ class QuerySuite(_QuerySuite):
_QuerySuite.__init__(self, args)
def runners(self):
return ["MemgraphRunner"]
return ["MemgraphRunner", "NeoRunner"]
def groups(self):
return ["create", "match", "expression", "aggregation", "return",
@ -285,56 +286,42 @@ class QueryParallelSuite(_QuerySuite):
_QuerySuite.__init__(self, args)
def runners(self):
return ["MemgraphRunner"]
return ["MemgraphRunner", "NeoRunner"]
def groups(self):
return ["aggregation_parallel", "create_parallel"]
class MemgraphRunner:
class _BaseRunner:
"""
Knows how to start and stop Memgraph (backend) some client frontent
(bolt), and execute a cypher query.
Knows how to start and stop database (backend) some client frontend (bolt),
and execute a cypher query.
Execution returns benchmarking data (execution times, memory
usage etc).
Inherited class should implement start method and initialise database_bin
and bolt_client members of type Process.
"""
def __init__(self, args):
"""
Creates and configures MemgraphRunner.
Args:
args: args to pass to ArgumentParser
"""
log.info("Initializing MemgraphRunner with arguments %r", args)
self.log = logging.getLogger("_BaseRunner")
# parse arguments
argp = ArgumentParser("MemgraphRunnerArgumentParser")
argp.add_argument("--MemgraphRunnerBin",
default=os.path.join(os.path.dirname(__file__),
"../../../build/memgraph"))
argp.add_argument("--MemgraphRunnerConfig", required=False)
argp.add_argument("--MemgraphRunnerURI", default="localhost:7687")
argp.add_argument("--MemgraphRunnerEncryptBolt", action="store_true")
self.args, _ = argp.parse_known_args(args)
self.memgraph_bin = jail.get_process()
self.bolt_client = jail.get_process()
def _get_argparser(self):
argp = ArgumentParser("RunnerArgumentParser")
# TODO: These two options should be passed two database and client, not
# only client as we are doing at the moment.
argp.add_argument("--RunnerUri", default="localhost:7687")
argp.add_argument("--RunnerEncryptBolt", action="store_true")
return argp
def start(self):
log.info("MemgraphRunner.start")
environment = os.environ.copy()
if self.args.MemgraphRunnerConfig:
environment["MEMGRAPH_CONFIG"] = self.args.MemgraphRunnerConfig
self.memgraph_bin.run(self.args.MemgraphRunnerBin, env=environment)
# TODO change to a check via SIGUSR
time.sleep(1.0)
return self.memgraph_bin.get_pid()
raise NotImplementedError(
"This method should be implemented in derivded class")
def execute(self, queries, num_client_workers):
log.debug("MemgraphRunner.execute('%s')", str(queries))
self.log.debug("execute('%s')", str(queries))
client_args = [path.join(path.dirname(__file__), "bolt_client.py")]
client_args += ["--endpoint", self.args.MemgraphRunnerURI]
client_args += ["--endpoint", self.args.RunnerUri]
client_args += ["--num-workers", str(num_client_workers)]
if self.args.MemgraphRunnerEncryptBolt:
if self.args.RunnerEncryptBolt:
client_args.append("--ssl-enabled")
queries_fd, queries_path = tempfile.mkstemp()
try:
@ -348,12 +335,12 @@ class MemgraphRunner:
# TODO make the timeout configurable per query or something
return_code = self.bolt_client.run_and_wait(
"python3", client_args, timeout=300, stdin=queries_path)
"python3", client_args, timeout=10000, stdin=queries_path)
os.remove(queries_path)
if return_code != 0:
with open(self.bolt_client.get_stderr()) as f:
stderr = f.read()
log.error("MemgraphRunner - error while executing queries '%s'. "
self.log.error("Error while executing queries '%s'. "
"Failed with return_code %d and stderr:\n%s",
str(queries), return_code, stderr)
raise Exception("BoltClient execution failed")
@ -361,11 +348,80 @@ class MemgraphRunner:
return json.loads(f.read())
def stop(self):
log.info("MemgraphRunner.stop")
self.log.info("stop")
self.bolt_client.send_signal(jail.SIGKILL)
self.bolt_client.wait()
self.memgraph_bin.send_signal(jail.SIGKILL)
self.memgraph_bin.wait()
self.database_bin.send_signal(jail.SIGKILL)
self.database_bin.wait()
class MemgraphRunner(_BaseRunner):
"""
Knows how to start and stop Memgraph (backend) some client frontent
(bolt), and execute a cypher query.
Execution returns benchmarking data (execution times, memory
usage etc).
"""
def __init__(self, args):
super(MemgraphRunner, self).__init__(args)
self.log = logging.getLogger("MemgraphRunner")
argp = self._get_argparser()
argp.add_argument("--RunnerBin",
default=os.path.join(os.path.dirname(__file__),
"../../../build/memgraph"))
argp.add_argument("--RunnerConfig",
default=os.path.join(
os.path.dirname(__file__),
"../../../config/benchmarking.conf"))
# parse args
self.log.info("Initializing Runner with arguments %r", args)
self.args, _ = argp.parse_known_args(args)
self.database_bin = jail.get_process()
self.bolt_client = jail.get_process()
def start(self):
self.log.info("start")
environment = os.environ.copy()
environment["MEMGRAPH_CONFIG"] = self.args.RunnerConfig
self.database_bin.run(self.args.RunnerBin, env=environment,
timeout=10000)
# TODO change to a check via SIGUSR
time.sleep(1.0)
return self.database_bin.get_pid()
class NeoRunner(_BaseRunner):
def __init__(self, args):
super(NeoRunner, self).__init__(args)
self.log = logging.getLogger("NeoRunner")
argp = self._get_argparser()
argp.add_argument(
"--RunnerConfigDir",
default=path.join(path.dirname(path.realpath(__file__)),
"neo4j_config"))
argp.add_argument(
"--RunnerHomeDir",
default=path.join(path.dirname(path.realpath(__file__)),
"neo4j_home"))
# parse args
self.log.info("Initializing Runner with arguments %r", args)
self.args, _ = argp.parse_known_args(args)
self.database_bin = jail.get_process()
self.bolt_client = jail.get_process()
def start(self):
self.log.info("start")
environment = os.environ.copy()
environment["NEO4J_CONF"] = self.args.RunnerConfigDir
environment["NEO4J_HOME"] = self.args.RunnerHomeDir
neo4j_data_path = path.join(environment["NEO4J_HOME"], "data")
if path.exists(neo4j_data_path):
shutil.rmtree(neo4j_data_path)
self.database_bin.run("/usr/share/neo4j/bin/neo4j", args=["console"],
env=environment, timeout=10000)
# TODO change to a check via SIGUSR
time.sleep(5.0)
return self.database_bin.get_pid()
def parse_known_args():
@ -396,7 +452,8 @@ def main():
log.info("Executing for suite '%s', runner '%s'", args.suite, args.runner)
# Create suite
suites = {"QuerySuite": QuerySuite, "QueryParallelSuite": QueryParallelSuite}
suites = {"QuerySuite": QuerySuite,
"QueryParallelSuite": QueryParallelSuite}
if args.suite not in suites:
raise Exception(
"Suite '{}' isn't registered. Registered suites are: {}".format(
@ -410,7 +467,7 @@ def main():
sum([len(x) for x in group_scenarios.values()]))
# Create runner
runners = {"MemgraphRunner": MemgraphRunner}
runners = {"MemgraphRunner": MemgraphRunner, "NeoRunner": NeoRunner}
# TODO if make runner argument optional, then execute all runners
if args.runner not in suite.runners():
raise Exception("Runner '{}' not registered for suite '{}'".format(

View File

@ -1,7 +1,6 @@
#!/usr/bin/python3
import atexit
import os
import resource
import shutil
import subprocess
import sys
@ -18,12 +17,10 @@ TEMP_DIR = os.path.join(SCRIPT_DIR, ".temp")
STORAGE_DIR = os.path.join(SCRIPT_DIR, ".storage")
class ProcessException(Exception):
pass
class Process:
def __init__(self, tid):
self._tid = tid
@ -33,9 +30,9 @@ class Process:
self._usage = {}
self._files = []
def run(self, binary, args = [], env = {}, timeout = 120, stdin = "/dev/null"):
def run(self, binary, args=[], env={}, timeout=10000, stdin="/dev/null"):
# don't start a new process if one is already running
if self._proc != None and self._proc.returncode == None:
if self._proc is not None and self._proc.returncode is None:
raise ProcessException
# clear previous usage

View File

@ -0,0 +1,318 @@
#*****************************************************************
# Neo4j configuration
#
# For more details and a complete list of settings, please see
# https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/
#*****************************************************************
# The name of the database to mount
#dbms.active_database=graph.db
# Paths of directories in the installation.
#dbms.directories.data=/var/lib/neo4j/data
#dbms.directories.plugins=/var/lib/neo4j/plugins
#dbms.directories.certificates=/var/lib/neo4j/certificates
#dbms.directories.logs=/var/log/neo4j
dbms.directories.lib=/usr/share/neo4j/lib
#dbms.directories.run=/var/run/neo4j
# This setting constrains all `LOAD CSV` import files to be under the `import` directory. Remove or comment it out to
# allow files to be loaded from anywhere in the filesystem; this introduces possible security problems. See the
# `LOAD CSV` section of the manual for details.
dbms.directories.import=/var/lib/neo4j/import
# Whether requests to Neo4j are authenticated.
# To disable authentication, uncomment this line
dbms.security.auth_enabled=false
# Enable this to be able to upgrade a store from an older version.
#dbms.allow_format_migration=true
# Java Heap Size: by default the Java heap size is dynamically
# calculated based on available system resources.
# Uncomment these lines to set specific initial and maximum
# heap size.
#dbms.memory.heap.initial_size=512m
#dbms.memory.heap.max_size=512m
# The amount of memory to use for mapping the store files, in bytes (or
# kilobytes with the 'k' suffix, megabytes with 'm' and gigabytes with 'g').
# If Neo4j is running on a dedicated server, then it is generally recommended
# to leave about 2-4 gigabytes for the operating system, give the JVM enough
# heap to hold all your transaction state and query context, and then leave the
# rest for the page cache.
# The default page cache memory assumes the machine is dedicated to running
# Neo4j, and is heuristically set to 50% of RAM minus the max Java heap size.
#dbms.memory.pagecache.size=10g
#*****************************************************************
# Network connector configuration
#*****************************************************************
# With default configuration Neo4j only accepts local connections.
# To accept non-local connections, uncomment this line:
#dbms.connectors.default_listen_address=0.0.0.0
# You can also choose a specific network interface, and configure a non-default
# port for each connector, by setting their individual listen_address.
# The address at which this server can be reached by its clients. This may be the server's IP address or DNS name, or
# it may be the address of a reverse proxy which sits in front of the server. This setting may be overridden for
# individual connectors below.
#dbms.connectors.default_advertised_address=localhost
# You can also choose a specific advertised hostname or IP address, and
# configure an advertised port for each connector, by setting their
# individual advertised_address.
# Bolt connector
dbms.connector.bolt.enabled=true
#dbms.connector.bolt.tls_level=OPTIONAL
#dbms.connector.bolt.listen_address=:7687
# HTTP Connector. There must be exactly one HTTP connector.
dbms.connector.http.enabled=true
#dbms.connector.http.listen_address=:7474
# HTTPS Connector. There can be zero or one HTTPS connectors.
dbms.connector.https.enabled=true
#dbms.connector.https.listen_address=:7473
# Number of Neo4j worker threads.
#dbms.threads.worker_count=
#*****************************************************************
# SSL system configuration
#*****************************************************************
# Names of the SSL policies to be used for the respective components.
# The legacy policy is a special policy which is not defined in
# the policy configuration section, but rather derives from
# dbms.directories.certificates and associated files
# (by default: neo4j.key and neo4j.cert). Its use will be deprecated.
# The policies to be used for connectors.
#
# N.B: Note that a connector must be configured to support/require
# SSL/TLS for the policy to actually be utilized.
#
# see: dbms.connector.*.tls_level
#bolt.ssl_policy=legacy
#https.ssl_policy=legacy
#*****************************************************************
# SSL policy configuration
#*****************************************************************
# Each policy is configured under a separate namespace, e.g.
# dbms.ssl.policy.<policyname>.*
#
# The example settings below are for a new policy named 'default'.
# The base directory for cryptographic objects. Each policy will by
# default look for its associated objects (keys, certificates, ...)
# under the base directory.
#
# Every such setting can be overriden using a full path to
# the respective object, but every policy will by default look
# for cryptographic objects in its base location.
#
# Mandatory setting
#dbms.ssl.policy.default.base_directory=certificates/default
# Allows the generation of a fresh private key and a self-signed
# certificate if none are found in the expected locations. It is
# recommended to turn this off again after keys have been generated.
#
# Keys should in general be generated and distributed offline
# by a trusted certificate authority (CA) and not by utilizing
# this mode.
#dbms.ssl.policy.default.allow_key_generation=false
# Enabling this makes it so that this policy ignores the contents
# of the trusted_dir and simply resorts to trusting everything.
#
# Use of this mode is discouraged. It would offer encryption but no security.
#dbms.ssl.policy.default.trust_all=false
# The private key for the default SSL policy. By default a file
# named private.key is expected under the base directory of the policy.
# It is mandatory that a key can be found or generated.
#dbms.ssl.policy.default.private_key=
# The private key for the default SSL policy. By default a file
# named public.crt is expected under the base directory of the policy.
# It is mandatory that a certificate can be found or generated.
#dbms.ssl.policy.default.public_certificate=
# The certificates of trusted parties. By default a directory named
# 'trusted' is expected under the base directory of the policy. It is
# mandatory to create the directory so that it exists, because it cannot
# be auto-created (for security purposes).
#
# To enforce client authentication client_auth must be set to 'require'!
#dbms.ssl.policy.default.trusted_dir=
# Client authentication setting. Values: none, optional, require
# The default is to require client authentication.
#
# Servers are always authenticated unless explicitly overridden
# using the trust_all setting. In a mutual authentication setup this
# should be kept at the default of require and trusted certificates
# must be installed in the trusted_dir.
#dbms.ssl.policy.default.client_auth=require
# A comma-separated list of allowed TLS versions.
# By default TLSv1, TLSv1.1 and TLSv1.2 are allowed.
#dbms.ssl.policy.default.tls_versions=
# A comma-separated list of allowed ciphers.
# The default ciphers are the defaults of the JVM platform.
#dbms.ssl.policy.default.ciphers=
#*****************************************************************
# Logging configuration
#*****************************************************************
# To enable HTTP logging, uncomment this line
#dbms.logs.http.enabled=true
# Number of HTTP logs to keep.
#dbms.logs.http.rotation.keep_number=5
# Size of each HTTP log that is kept.
#dbms.logs.http.rotation.size=20m
# To enable GC Logging, uncomment this line
#dbms.logs.gc.enabled=true
# GC Logging Options
# see http://docs.oracle.com/cd/E19957-01/819-0084-10/pt_tuningjava.html#wp57013 for more information.
#dbms.logs.gc.options=-XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintGCApplicationStoppedTime -XX:+PrintPromotionFailure -XX:+PrintTenuringDistribution
# Number of GC logs to keep.
#dbms.logs.gc.rotation.keep_number=5
# Size of each GC log that is kept.
#dbms.logs.gc.rotation.size=20m
# Size threshold for rotation of the debug log. If set to zero then no rotation will occur. Accepts a binary suffix "k",
# "m" or "g".
#dbms.logs.debug.rotation.size=20m
# Maximum number of history files for the internal log.
#dbms.logs.debug.rotation.keep_number=7
#*****************************************************************
# Miscellaneous configuration
#*****************************************************************
# Enable this to specify a parser other than the default one.
#cypher.default_language_version=3.0
# Determines if Cypher will allow using file URLs when loading data using
# `LOAD CSV`. Setting this value to `false` will cause Neo4j to fail `LOAD CSV`
# clauses that load data from the file system.
#dbms.security.allow_csv_import_from_file_urls=true
# Retention policy for transaction logs needed to perform recovery and backups.
dbms.tx_log.rotation.retention_policy=1 days
# Enable a remote shell server which Neo4j Shell clients can log in to.
#dbms.shell.enabled=true
# The network interface IP the shell will listen on (use 0.0.0.0 for all interfaces).
#dbms.shell.host=127.0.0.1
# The port the shell will listen on, default is 1337.
#dbms.shell.port=1337
# Only allow read operations from this Neo4j instance. This mode still requires
# write access to the directory for lock purposes.
#dbms.read_only=false
# Comma separated list of JAX-RS packages containing JAX-RS resources, one
# package name for each mountpoint. The listed package names will be loaded
# under the mountpoints specified. Uncomment this line to mount the
# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from
# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of
# http://localhost:7474/examples/unmanaged/helloworld/{nodeId}
#dbms.unmanaged_extension_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged
#********************************************************************
# JVM Parameters
#********************************************************************
# G1GC generally strikes a good balance between throughput and tail
# latency, without too much tuning.
dbms.jvm.additional=-XX:+UseG1GC
# Have common exceptions keep producing stack traces, so they can be
# debugged regardless of how often logs are rotated.
dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow
# Make sure that `initmemory` is not only allocated, but committed to
# the process, before starting the database. This reduces memory
# fragmentation, increasing the effectiveness of transparent huge
# pages. It also reduces the possibility of seeing performance drop
# due to heap-growing GC events, where a decrease in available page
# cache leads to an increase in mean IO response time.
# Try reducing the heap memory, if this flag degrades performance.
dbms.jvm.additional=-XX:+AlwaysPreTouch
# Trust that non-static final fields are really final.
# This allows more optimizations and improves overall performance.
# NOTE: Disable this if you use embedded mode, or have extensions or dependencies that may use reflection or
# serialization to change the value of final fields!
dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions
dbms.jvm.additional=-XX:+TrustFinalNonStaticFields
# Disable explicit garbage collection, which is occasionally invoked by the JDK itself.
dbms.jvm.additional=-XX:+DisableExplicitGC
# Remote JMX monitoring, uncomment and adjust the following lines as needed. Absolute paths to jmx.access and
# jmx.password files are required.
# Also make sure to update the jmx.access and jmx.password files with appropriate permission roles and passwords,
# the shipped configuration contains only a read only role called 'monitor' with password 'Neo4j'.
# For more details, see: http://download.oracle.com/javase/8/docs/technotes/guides/management/agent.html
# On Unix based systems the jmx.password file needs to be owned by the user that will run the server,
# and have permissions set to 0600.
# For details on setting these file permissions on Windows see:
# http://docs.oracle.com/javase/8/docs/technotes/guides/management/security-windows.html
#dbms.jvm.additional=-Dcom.sun.management.jmxremote.port=3637
#dbms.jvm.additional=-Dcom.sun.management.jmxremote.authenticate=true
#dbms.jvm.additional=-Dcom.sun.management.jmxremote.ssl=false
#dbms.jvm.additional=-Dcom.sun.management.jmxremote.password.file=/absolute/path/to/conf/jmx.password
#dbms.jvm.additional=-Dcom.sun.management.jmxremote.access.file=/absolute/path/to/conf/jmx.access
# Some systems cannot discover host name automatically, and need this line configured:
#dbms.jvm.additional=-Djava.rmi.server.hostname=$THE_NEO4J_SERVER_HOSTNAME
# Expand Diffie Hellman (DH) key size from default 1024 to 2048 for DH-RSA cipher suites used in server TLS handshakes.
# This is to protect the server from any potential passive eavesdropping.
dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048
#********************************************************************
# Wrapper Windows NT/2000/XP Service Properties
#********************************************************************
# WARNING - Do not modify any of these properties when an application
# using this configuration file has been installed as a service.
# Please uninstall the service before modifying this section. The
# service can then be reinstalled.
# Name of the service
dbms.windows_service_name=neo4j
#********************************************************************
# Other Neo4j system properties
#********************************************************************
dbms.jvm.additional=-Dunsupported.dbms.udc.source=debian

View File

@ -0,0 +1,2 @@
*
!.gitignore

View File

@ -12,7 +12,7 @@ from threading import Thread
from time import sleep
from argparse import ArgumentParser
from neo4j.v1 import GraphDatabase, basic_auth
from neo4j.v1 import GraphDatabase
class OutputData:
@ -180,13 +180,13 @@ def argument_session(args):
:return: Bolt session context manager based on program arguments
'''
return bolt_session('bolt://' + args.endpoint,
basic_auth(args.username, args.password))
(args.username, str(args.password)))
def argument_driver(args, ssl=False):
return GraphDatabase.driver(
'bolt://' + args.endpoint,
basic_auth=(args.username, args.password),
auth=(args.username, str(args.password)),
encrypted=ssl)