minor refactoring

This commit is contained in:
iliax 2023-08-07 20:56:25 +04:00
parent a11dda8a5f
commit ad952bf39d
2 changed files with 54 additions and 53 deletions

View file

@ -9,30 +9,32 @@ class ConsumingStats {
private long bytes = 0;
private int records = 0;
private long elapsed = 0;
private int filterApplyErrors = 0;
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
PolledRecords polledRecords,
int filterApplyErrors) {
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
bytes += polledRecords.bytes();
this.records += polledRecords.count();
this.elapsed += polledRecords.elapsed().toMillis();
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
.consuming(createConsumingStats(sink, filterApplyErrors))
.consuming(createConsumingStats(sink))
);
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
void incFilterApplyError() {
filterApplyErrors++;
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.DONE)
.consuming(createConsumingStats(sink, filterApplyErrors))
.consuming(createConsumingStats(sink))
);
}
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
int filterApplyErrors) {
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink) {
return new TopicMessageConsumingDTO()
.bytesConsumed(this.bytes)
.elapsedMs(this.elapsed)

View file

@ -29,7 +29,6 @@ class MessagesProcessing {
private final ConsumingStats consumingStats = new ConsumingStats();
private long sentMessages = 0;
private int filterApplyErrors = 0;
private final ConsumerRecordDeserializer deserializer;
private final Predicate<TopicMessageDTO> filter;
@ -40,6 +39,50 @@ class MessagesProcessing {
return limit != null && sentMessages >= limit;
}
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
sortForSending(polled, ascendingSortBeforeSend)
.forEach(rec -> {
if (!limitReached() && !sink.isCancelled()) {
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
try {
if (filter.test(topicMessage)) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
.message(topicMessage)
);
sentMessages++;
}
} catch (Exception e) {
consumingStats.incFilterApplyError();
log.trace("Error applying filter for message {}", topicMessage);
}
}
});
}
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
if (!sink.isCancelled()) {
consumingStats.sendConsumingEvt(sink, polledRecords);
}
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
if (!sink.isCancelled()) {
consumingStats.sendFinishEvent(sink);
}
}
void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
if (!sink.isCancelled()) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.PHASE)
.phase(new TopicMessagePhaseDTO().name(name))
);
}
}
/*
* Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets.
*/
@ -66,48 +109,4 @@ class MessagesProcessing {
return Iterables.mergeSorted(perPartition.values(), tsComparator);
}
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
sortForSending(polled, ascendingSortBeforeSend)
.forEach(rec -> {
if (!limitReached() && !sink.isCancelled()) {
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
try {
if (filter.test(topicMessage)) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
.message(topicMessage)
);
sentMessages++;
}
} catch (Exception e) {
filterApplyErrors++;
log.trace("Error applying filter for message {}", topicMessage);
}
}
});
}
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
if (!sink.isCancelled()) {
consumingStats.sendConsumingEvt(sink, polledRecords, filterApplyErrors);
}
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
if (!sink.isCancelled()) {
consumingStats.sendFinishEvent(sink, filterApplyErrors);
}
}
void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
if (!sink.isCancelled()) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.PHASE)
.phase(new TopicMessagePhaseDTO().name(name))
);
}
}
}