Use asynchronous committing

This commit is contained in:
János Benjamin Antal 2022-02-02 17:43:56 +01:00
parent 02336ada39
commit e755b3f2f5
2 changed files with 33 additions and 3 deletions

View File

@ -136,7 +136,10 @@ int64_t Message::Offset() const {
}
Consumer::Consumer(ConsumerInfo info, ConsumerFunction consumer_function)
: info_{std::move(info)}, consumer_function_(std::move(consumer_function)), cb_(info_.consumer_name) {
: info_{std::move(info)},
consumer_function_(std::move(consumer_function)),
cb_(info_.consumer_name),
offset_cb_(info_.consumer_name) {
MG_ASSERT(consumer_function_, "Empty consumer function for Kafka consumer");
// NOLINTNEXTLINE (modernize-use-nullptr)
if (info_.batch_interval < kMinimumInterval) {
@ -173,6 +176,10 @@ Consumer::Consumer(ConsumerInfo info, ConsumerFunction consumer_function)
throw ConsumerFailedToInitializeException(info_.consumer_name, error);
}
if (conf->set("offset_commit_cb", &offset_cb_, error) != RdKafka::Conf::CONF_OK) {
throw ConsumerFailedToInitializeException(info_.consumer_name, error);
}
if (conf->set("enable.partition.eof", "false", error) != RdKafka::Conf::CONF_OK) {
throw ConsumerFailedToInitializeException(info_.consumer_name, error);
}
@ -412,11 +419,11 @@ void Consumer::StartConsuming() {
info_.consumer_name, fmt::format("Couldn't get offsets from librdkafka {}", RdKafka::err2str(err)));
}
spdlog::trace("Got offset positions for {}.", info_.consumer_name);
if (const auto err = consumer_->commitSync(partitions); err != RdKafka::ERR_NO_ERROR) {
if (const auto err = consumer_->commitAsync(partitions); err != RdKafka::ERR_NO_ERROR) {
spdlog::warn("Committing offset of consumer {} failed: {}", info_.consumer_name, RdKafka::err2str(err));
break;
}
spdlog::trace("Commited offsets for {}.", info_.consumer_name);
spdlog::trace("Requested committing offsets asynchronously for {}.", info_.consumer_name);
} catch (const std::exception &e) {
spdlog::warn("Error happened in consumer {} while processing a batch: {}!", info_.consumer_name, e.what());
break;
@ -479,4 +486,16 @@ void Consumer::ConsumerRebalanceCb::rebalance_cb(RdKafka::KafkaConsumer *consume
}
}
void Consumer::ConsumerRebalanceCb::set_offset(int64_t offset) { offset_ = offset; }
Consumer::OffsetCommitCb::OffsetCommitCb(std::string consumer_name) : consumer_name_{consumer_name} {};
void Consumer::OffsetCommitCb::offset_commit_cb(RdKafka::ErrorCode err,
std::vector<RdKafka::TopicPartition *> & /*offsets*/) {
if (err != RdKafka::ErrorCode::ERR_NO_ERROR) {
spdlog::error("Committing offset failed for {} with error \"{}\"", consumer_name_, RdKafka::err2str(err));
} else {
spdlog::trace("Committing offset succeeded for {}", consumer_name_);
}
}
} // namespace integrations::kafka

View File

@ -174,6 +174,16 @@ class Consumer final : public RdKafka::EventCb {
std::string consumer_name_;
};
class OffsetCommitCb : public RdKafka::OffsetCommitCb {
public:
explicit OffsetCommitCb(std::string consumer_name);
void offset_commit_cb(RdKafka::ErrorCode err, std::vector<RdKafka::TopicPartition *> &offsets) final;
private:
std::string consumer_name_;
};
ConsumerInfo info_;
ConsumerFunction consumer_function_;
mutable std::atomic<bool> is_running_{false};
@ -182,5 +192,6 @@ class Consumer final : public RdKafka::EventCb {
std::unique_ptr<RdKafka::KafkaConsumer, std::function<void(RdKafka::KafkaConsumer *)>> consumer_;
std::thread thread_;
ConsumerRebalanceCb cb_;
OffsetCommitCb offset_cb_;
};
} // namespace integrations::kafka