Fix kafka stream writer

Summary:
Run each query from a batch in a separate transaction. This will allow us to
abort a transaction if the interpreter throws on a query.
We should keep that in mind in the future for possible speedups.

Reviewers: teon.banek

Reviewed By: teon.banek

Subscribers: pullbot

Differential Revision: https://phabricator.memgraph.io/D1501
This commit is contained in:
Matija Santl 2018-07-19 15:51:42 +02:00
parent 4c27596fdd
commit 53c1dab873

View File

@ -266,16 +266,17 @@ void SingleNodeMain() {
auto stream_writer =
[&session_data](const std::vector<std::string> &queries) {
database::GraphDbAccessor dba(session_data.db);
for (auto &query : queries) {
database::GraphDbAccessor dba(session_data.db);
KafkaResultStream stream;
try {
session_data.interpreter(query, dba, {}, false).PullAll(stream);
dba.Commit();
} catch (const query::QueryException &e) {
LOG(ERROR) << e.what();
dba.Abort();
}
}
dba.Commit();
};
integrations::kafka::Streams kafka_streams{
@ -355,16 +356,17 @@ void MasterMain() {
auto stream_writer =
[&session_data](const std::vector<std::string> &queries) {
database::GraphDbAccessor dba(session_data.db);
for (auto &query : queries) {
database::GraphDbAccessor dba(session_data.db);
KafkaResultStream stream;
try {
session_data.interpreter(query, dba, {}, false).PullAll(stream);
dba.Commit();
} catch (const query::QueryException &e) {
LOG(ERROR) << e.what();
dba.Abort();
}
}
dba.Commit();
};
integrations::kafka::Streams kafka_streams{