|
@@ -10,14 +10,18 @@ import com.provectus.kafka.ui.model.SeekType;
|
|
|
import com.provectus.kafka.ui.model.TopicMessage;
|
|
|
import com.provectus.kafka.ui.util.ClusterUtil;
|
|
|
import java.time.Duration;
|
|
|
+import java.util.Collection;
|
|
|
+import java.util.HashMap;
|
|
|
import java.util.LinkedList;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
|
import java.util.Optional;
|
|
|
+import java.util.function.Supplier;
|
|
|
import java.util.stream.Collectors;
|
|
|
import lombok.RequiredArgsConstructor;
|
|
|
import lombok.extern.log4j.Log4j2;
|
|
|
import org.apache.commons.lang3.StringUtils;
|
|
|
+import org.apache.kafka.clients.consumer.Consumer;
|
|
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
|
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
|
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
@@ -47,30 +51,23 @@ public class ConsumingService {
|
|
|
int recordsLimit = Optional.ofNullable(limit)
|
|
|
.map(s -> Math.min(s, MAX_RECORD_LIMIT))
|
|
|
.orElse(DEFAULT_RECORD_LIMIT);
|
|
|
- RecordEmitter emitter = new RecordEmitter(kafkaService, cluster, topic, consumerPosition);
|
|
|
+ RecordEmitter emitter = new RecordEmitter(
|
|
|
+ () -> kafkaService.createConsumer(cluster),
|
|
|
+ new OffsetsSeek(topic, consumerPosition));
|
|
|
RecordDeserializer recordDeserializer =
|
|
|
deserializationService.getRecordDeserializerForCluster(cluster);
|
|
|
- return Flux.create(emitter::emit)
|
|
|
+ return Flux.create(emitter)
|
|
|
.subscribeOn(Schedulers.boundedElastic())
|
|
|
.map(r -> ClusterUtil.mapToTopicMessage(r, recordDeserializer))
|
|
|
.filter(m -> filterTopicMessage(m, query))
|
|
|
.limitRequest(recordsLimit);
|
|
|
}
|
|
|
|
|
|
- public Mono<Map<TopicPartition, Long>> loadOffsets(KafkaCluster cluster, String topicName,
|
|
|
- List<Integer> partitionsToInclude) {
|
|
|
+ public Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
|
|
|
+ List<Integer> partitionsToInclude) {
|
|
|
return Mono.fromSupplier(() -> {
|
|
|
try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
|
|
|
- var partitions = consumer.partitionsFor(topicName).stream()
|
|
|
- .filter(
|
|
|
- p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition()))
|
|
|
- .map(p -> new TopicPartition(topicName, p.partition()))
|
|
|
- .collect(Collectors.toList());
|
|
|
- var beginningOffsets = consumer.beginningOffsets(partitions);
|
|
|
- var endOffsets = consumer.endOffsets(partitions);
|
|
|
- return endOffsets.entrySet().stream()
|
|
|
- .filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
|
|
- .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
|
|
+ return significantOffsets(consumer, topicName, partitionsToInclude);
|
|
|
} catch (Exception e) {
|
|
|
log.error("Error occurred while consuming records", e);
|
|
|
throw new RuntimeException(e);
|
|
@@ -78,6 +75,25 @@ public class ConsumingService {
|
|
|
});
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * returns end offsets for partitions where start offset != end offsets.
|
|
|
+ * This is useful when we need to verify that partition is not empty.
|
|
|
+ */
|
|
|
+ private static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
|
|
|
+ String topicName,
|
|
|
+ Collection<Integer>
|
|
|
+ partitionsToInclude) {
|
|
|
+ var partitions = consumer.partitionsFor(topicName).stream()
|
|
|
+ .filter(p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition()))
|
|
|
+ .map(p -> new TopicPartition(topicName, p.partition()))
|
|
|
+ .collect(Collectors.toList());
|
|
|
+ var beginningOffsets = consumer.beginningOffsets(partitions);
|
|
|
+ var endOffsets = consumer.endOffsets(partitions);
|
|
|
+ return endOffsets.entrySet().stream()
|
|
|
+ .filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
|
|
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
|
|
+ }
|
|
|
+
|
|
|
private boolean filterTopicMessage(TopicMessage message, String query) {
|
|
|
if (StringUtils.isEmpty(query)) {
|
|
|
return true;
|
|
@@ -110,52 +126,48 @@ public class ConsumingService {
|
|
|
}
|
|
|
|
|
|
@RequiredArgsConstructor
|
|
|
- private static class RecordEmitter {
|
|
|
- private static final int MAX_EMPTY_POLLS_COUNT = 3;
|
|
|
+ static class RecordEmitter
|
|
|
+ implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
|
|
|
+
|
|
|
private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
|
|
|
|
|
|
- private final KafkaService kafkaService;
|
|
|
- private final KafkaCluster cluster;
|
|
|
- private final String topic;
|
|
|
- private final ConsumerPosition consumerPosition;
|
|
|
+ private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
|
|
+ private final OffsetsSeek offsetsSeek;
|
|
|
|
|
|
- public void emit(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
|
|
- try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
|
|
|
- assignAndSeek(consumer);
|
|
|
- int emptyPollsCount = 0;
|
|
|
- log.info("assignment: {}", consumer.assignment());
|
|
|
- while (!sink.isCancelled()) {
|
|
|
+ @Override
|
|
|
+ public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
|
|
+ try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
|
|
+ var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
|
|
|
+ while (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
|
|
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
|
|
log.info("{} records polled", records.count());
|
|
|
- if (records.count() == 0 && emptyPollsCount > MAX_EMPTY_POLLS_COUNT) {
|
|
|
- break;
|
|
|
- } else {
|
|
|
- emptyPollsCount++;
|
|
|
+ for (ConsumerRecord<Bytes, Bytes> record : records) {
|
|
|
+ if (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
|
|
+ sink.next(record);
|
|
|
+ waitingOffsets.markPolled(record);
|
|
|
+ } else {
|
|
|
+ break;
|
|
|
+ }
|
|
|
}
|
|
|
- records.iterator()
|
|
|
- .forEachRemaining(sink::next);
|
|
|
}
|
|
|
sink.complete();
|
|
|
+ log.info("Polling finished");
|
|
|
} catch (Exception e) {
|
|
|
log.error("Error occurred while consuming records", e);
|
|
|
throw new RuntimeException(e);
|
|
|
}
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- private List<TopicPartition> getRequestedPartitions() {
|
|
|
- Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
|
|
|
+ @RequiredArgsConstructor
|
|
|
+ static class OffsetsSeek {
|
|
|
|
|
|
- return Optional.ofNullable(cluster.getTopics().get(topic))
|
|
|
- .orElseThrow(() -> new IllegalArgumentException("Unknown topic: " + topic))
|
|
|
- .getPartitions().values().stream()
|
|
|
- .filter(internalPartition -> partitionPositions.isEmpty()
|
|
|
- || partitionPositions.containsKey(internalPartition.getPartition()))
|
|
|
- .map(partitionInfo -> new TopicPartition(topic, partitionInfo.getPartition()))
|
|
|
- .collect(Collectors.toList());
|
|
|
- }
|
|
|
+ private final String topic;
|
|
|
+ private final ConsumerPosition consumerPosition;
|
|
|
|
|
|
- private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
|
|
|
+ public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
|
|
|
SeekType seekType = consumerPosition.getSeekType();
|
|
|
+ log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
|
|
|
switch (seekType) {
|
|
|
case OFFSET:
|
|
|
assignAndSeekForOffset(consumer);
|
|
@@ -169,10 +181,21 @@ public class ConsumingService {
|
|
|
default:
|
|
|
throw new IllegalArgumentException("Unknown seekType: " + seekType);
|
|
|
}
|
|
|
+ log.info("Assignment: {}", consumer.assignment());
|
|
|
+ return new WaitingOffsets(topic, consumer);
|
|
|
}
|
|
|
|
|
|
- private void assignAndSeekForOffset(KafkaConsumer<Bytes, Bytes> consumer) {
|
|
|
- List<TopicPartition> partitions = getRequestedPartitions();
|
|
|
+ private List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
|
|
|
+ Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
|
|
|
+ return consumer.partitionsFor(topic).stream()
|
|
|
+ .filter(
|
|
|
+ p -> partitionPositions.isEmpty() || partitionPositions.containsKey(p.partition()))
|
|
|
+ .map(p -> new TopicPartition(p.topic(), p.partition()))
|
|
|
+ .collect(Collectors.toList());
|
|
|
+ }
|
|
|
+
|
|
|
+ private void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer) {
|
|
|
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
|
|
consumer.assign(partitions);
|
|
|
consumerPosition.getSeekTo().forEach((partition, offset) -> {
|
|
|
TopicPartition topicPartition = new TopicPartition(topic, partition);
|
|
@@ -180,7 +203,7 @@ public class ConsumingService {
|
|
|
});
|
|
|
}
|
|
|
|
|
|
- private void assignAndSeekForTimestamp(KafkaConsumer<Bytes, Bytes> consumer) {
|
|
|
+ private void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
|
|
Map<TopicPartition, Long> timestampsToSearch =
|
|
|
consumerPosition.getSeekTo().entrySet().stream()
|
|
|
.collect(Collectors.toMap(
|
|
@@ -200,10 +223,34 @@ public class ConsumingService {
|
|
|
offsetsForTimestamps.forEach(consumer::seek);
|
|
|
}
|
|
|
|
|
|
- private void assignAndSeekFromBeginning(KafkaConsumer<Bytes, Bytes> consumer) {
|
|
|
- List<TopicPartition> partitions = getRequestedPartitions();
|
|
|
+ private void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer) {
|
|
|
+ List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
|
|
consumer.assign(partitions);
|
|
|
consumer.seekToBeginning(partitions);
|
|
|
}
|
|
|
+
|
|
|
+ static class WaitingOffsets {
|
|
|
+ final Map<Integer, Long> offsets = new HashMap<>(); // partition number -> offset
|
|
|
+
|
|
|
+ WaitingOffsets(String topic, Consumer<?, ?> consumer) {
|
|
|
+ var partitions = consumer.assignment().stream()
|
|
|
+ .map(TopicPartition::partition)
|
|
|
+ .collect(Collectors.toList());
|
|
|
+ significantOffsets(consumer, topic, partitions)
|
|
|
+ .forEach((tp, offset) -> offsets.put(tp.partition(), offset - 1));
|
|
|
+ }
|
|
|
+
|
|
|
+ void markPolled(ConsumerRecord<?, ?> rec) {
|
|
|
+ Long waiting = offsets.get(rec.partition());
|
|
|
+ if (waiting != null && waiting <= rec.offset()) {
|
|
|
+ offsets.remove(rec.partition());
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ boolean endReached() {
|
|
|
+ return offsets.isEmpty();
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
}
|
|
|
}
|