iliax hai 1 ano
pai
achega
8d30d14458

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java

@@ -6,7 +6,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
-public abstract class AbstractEmitter {
+public abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
 
 
   private final MessagesProcessing messagesProcessing;
   private final MessagesProcessing messagesProcessing;
   protected final PollingSettings pollingSettings;
   protected final PollingSettings pollingSettings;

+ 6 - 9
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java

@@ -16,21 +16,22 @@ import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
 @Slf4j
 @Slf4j
-public class BackwardRecordEmitter
-    extends AbstractEmitter
-    implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
+public class BackwardRecordEmitter extends AbstractEmitter {
 
 
   private final Supplier<EnhancedConsumer> consumerSupplier;
   private final Supplier<EnhancedConsumer> consumerSupplier;
   private final ConsumerPosition consumerPosition;
   private final ConsumerPosition consumerPosition;
   private final int messagesPerPage;
   private final int messagesPerPage;
 
 
+  private final TimestampsSortedMessageProcessing messagesProcessing;
+
   public BackwardRecordEmitter(
   public BackwardRecordEmitter(
       Supplier<EnhancedConsumer> consumerSupplier,
       Supplier<EnhancedConsumer> consumerSupplier,
       ConsumerPosition consumerPosition,
       ConsumerPosition consumerPosition,
       int messagesPerPage,
       int messagesPerPage,
-      MessagesProcessing messagesProcessing,
+      TimestampsSortedMessageProcessing messagesProcessing,
       PollingSettings pollingSettings) {
       PollingSettings pollingSettings) {
     super(messagesProcessing, pollingSettings);
     super(messagesProcessing, pollingSettings);
+    this.messagesProcessing = messagesProcessing;
     this.consumerPosition = consumerPosition;
     this.consumerPosition = consumerPosition;
     this.messagesPerPage = messagesPerPage;
     this.messagesPerPage = messagesPerPage;
     this.consumerSupplier = consumerSupplier;
     this.consumerSupplier = consumerSupplier;
@@ -73,6 +74,7 @@ public class BackwardRecordEmitter
         } else if (sink.isCancelled()) {
         } else if (sink.isCancelled()) {
           log.debug("sink is cancelled after partitions poll iteration");
           log.debug("sink is cancelled after partitions poll iteration");
         }
         }
+        messagesProcessing.flush(sink);
       }
       }
       sendFinishStatsAndCompleteSink(sink);
       sendFinishStatsAndCompleteSink(sink);
       log.debug("Polling finished");
       log.debug("Polling finished");
@@ -108,14 +110,9 @@ public class BackwardRecordEmitter
           .filter(r -> r.offset() < toOffset)
           .filter(r -> r.offset() < toOffset)
           .toList();
           .toList();
 
 
-      if (polledRecords.count() > 0 && filteredRecords.isEmpty()) {
-        // we already read all messages in target offsets interval
-        break;
-      }
       recordsToSend.addAll(filteredRecords);
       recordsToSend.addAll(filteredRecords);
     }
     }
     log.debug("{} records to send", recordsToSend.size());
     log.debug("{} records to send", recordsToSend.size());
-    Collections.reverse(recordsToSend);
     return recordsToSend;
     return recordsToSend;
   }
   }
 }
 }

+ 28 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java

@@ -6,7 +6,9 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.util.SslPropertiesUtil;
 import com.provectus.kafka.ui.util.SslPropertiesUtil;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.Random;
 import java.util.Random;
+import java.util.UUID;
 import java.util.function.Supplier;
 import java.util.function.Supplier;
+import lombok.SneakyThrows;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -14,15 +16,12 @@ import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.common.errors.InterruptException;
 import org.apache.kafka.common.errors.InterruptException;
-import org.apache.kafka.common.serialization.ByteArraySerializer;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
 @Slf4j
 @Slf4j
-public class ForwardRecordEmitter
-    extends AbstractEmitter
-    implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
+public class ForwardRecordEmitter extends AbstractEmitter {
 
 
   private final Supplier<EnhancedConsumer> consumerSupplier;
   private final Supplier<EnhancedConsumer> consumerSupplier;
   private final ConsumerPosition position;
   private final ConsumerPosition position;
@@ -68,18 +67,39 @@ public class ForwardRecordEmitter
     }
     }
   }
   }
 
 
+  //  @SneakyThrows
   //  public static void main(String[] args) {
   //  public static void main(String[] args) {
-  //    String topic = "test";
+  //    String topic = "test2tx";
   //
   //
   //    Properties properties = new Properties();
   //    Properties properties = new Properties();
   //    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
   //    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
   //    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
   //    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
   //    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
   //    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
+  //    properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, UUID.randomUUID().toString());
+  //
   //    try (var producer = new KafkaProducer<>(properties)) {
   //    try (var producer = new KafkaProducer<>(properties)) {
-  //      for (int i = 0; i < 10; i++) {
-  //        for (int j = 0; j < 30; j++) {
-  //          producer.send(new ProducerRecord<>(topic, (i + 1) + "", "j=" + j + "-" + RandomStringUtils.random(5)));
+  //      producer.initTransactions();
+  //
+  //      for (int i = 0; i < 5; i++) {
+  //        producer.beginTransaction();
+  //        for (int j = 0; j < 300; j++) {
+  //          producer.send(new ProducerRecord<>(topic, (i + 1) + "", "j=" + j + "-" + RandomStringUtils.random(5)))
+  //              .get();
+  //        }
+  //        producer.abortTransaction();
+  //
+  //        producer.beginTransaction();
+  //        producer.send(new ProducerRecord<>(topic, (i + 1) + "", "VISIBLE" + "-" + RandomStringUtils.random(5)))
+  //            .get();
+  //        producer.commitTransaction();
+  //
+  //        producer.beginTransaction();
+  //        for (int j = 0; j < 300; j++) {
+  //          producer.send(
+  //          new ProducerRecord<>(topic, ((i * 10) + 1) + "", "j=" + j + "-" + RandomStringUtils.random(5)))
+  //              .get();
   //        }
   //        }
+  //        producer.abortTransaction();
   //      }
   //      }
   //    }
   //    }
   //  }
   //  }

+ 6 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java

@@ -14,13 +14,13 @@ import reactor.core.publisher.FluxSink;
 @Slf4j
 @Slf4j
 public class MessagesProcessing {
 public class MessagesProcessing {
 
 
-  private final ConsumingStats consumingStats = new ConsumingStats();
-  private long sentMessages = 0;
-  private int filterApplyErrors = 0;
+  protected final ConsumingStats consumingStats = new ConsumingStats();
+  protected long sentMessages = 0;
+  protected int filterApplyErrors = 0;
 
 
-  private final ConsumerRecordDeserializer deserializer;
-  private final Predicate<TopicMessageDTO> filter;
-  private final @Nullable Integer limit;
+  protected final ConsumerRecordDeserializer deserializer;
+  protected final Predicate<TopicMessageDTO> filter;
+  protected final @Nullable Integer limit;
 
 
   public MessagesProcessing(ConsumerRecordDeserializer deserializer,
   public MessagesProcessing(ConsumerRecordDeserializer deserializer,
                             Predicate<TopicMessageDTO> filter,
                             Predicate<TopicMessageDTO> filter,

+ 7 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java

@@ -14,13 +14,13 @@ import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.TopicPartition;
 
 
 @RequiredArgsConstructor(access = AccessLevel.PACKAGE)
 @RequiredArgsConstructor(access = AccessLevel.PACKAGE)
-class SeekOperations {
+public class SeekOperations {
 
 
   private final Consumer<?, ?> consumer;
   private final Consumer<?, ?> consumer;
   private final OffsetsInfo offsetsInfo;
   private final OffsetsInfo offsetsInfo;
   private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
   private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
 
 
-  static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
+  public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
     OffsetsInfo offsetsInfo;
     OffsetsInfo offsetsInfo;
     if (consumerPosition.getSeekTo() == null) {
     if (consumerPosition.getSeekTo() == null) {
       offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
       offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
@@ -34,25 +34,25 @@ class SeekOperations {
     );
     );
   }
   }
 
 
-  void assignAndSeekNonEmptyPartitions() {
+  public void assignAndSeekNonEmptyPartitions() {
     consumer.assign(offsetsForSeek.keySet());
     consumer.assign(offsetsForSeek.keySet());
     offsetsForSeek.forEach(consumer::seek);
     offsetsForSeek.forEach(consumer::seek);
   }
   }
 
 
-  Map<TopicPartition, Long> getBeginOffsets() {
+  public Map<TopicPartition, Long> getBeginOffsets() {
     return offsetsInfo.getBeginOffsets();
     return offsetsInfo.getBeginOffsets();
   }
   }
 
 
-  Map<TopicPartition, Long> getEndOffsets() {
+  public Map<TopicPartition, Long> getEndOffsets() {
     return offsetsInfo.getEndOffsets();
     return offsetsInfo.getEndOffsets();
   }
   }
 
 
-  boolean assignedPartitionsFullyPolled() {
+  public boolean assignedPartitionsFullyPolled() {
     return offsetsInfo.assignedPartitionsFullyPolled();
     return offsetsInfo.assignedPartitionsFullyPolled();
   }
   }
 
 
   // Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
   // Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
-  Map<TopicPartition, Long> getOffsetsForSeek() {
+  public Map<TopicPartition, Long> getOffsetsForSeek() {
     return offsetsForSeek;
     return offsetsForSeek;
   }
   }
 
 

+ 1 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java

@@ -9,8 +9,7 @@ import org.apache.kafka.common.errors.InterruptException;
 import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.FluxSink;
 
 
 @Slf4j
 @Slf4j
-public class TailingEmitter extends AbstractEmitter
-    implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
+public class TailingEmitter extends AbstractEmitter {
 
 
   private final Supplier<EnhancedConsumer> consumerSupplier;
   private final Supplier<EnhancedConsumer> consumerSupplier;
   private final ConsumerPosition consumerPosition;
   private final ConsumerPosition consumerPosition;

+ 71 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TimestampsSortedMessageProcessing.java

@@ -0,0 +1,71 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.model.TopicMessageDTO;
+import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.List;
+import java.util.function.Predicate;
+import java.util.stream.Stream;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.utils.Bytes;
+import org.jetbrains.annotations.Nullable;
+import reactor.core.publisher.FluxSink;
+
+@Slf4j
+public class TimestampsSortedMessageProcessing extends MessagesProcessing {
+
+  private final List<TopicMessageDTO> buffer = new ArrayList<>();
+
+  public TimestampsSortedMessageProcessing(ConsumerRecordDeserializer deserializer,
+                                           Predicate<TopicMessageDTO> filter,
+                                           @Nullable Integer limit) {
+    super(deserializer, filter, limit);
+  }
+
+  @Override
+  void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
+    if (!sink.isCancelled() && !limitReached()) {
+      TopicMessageDTO topicMessage = deserializer.deserialize(rec);
+      try {
+        if (filter.test(topicMessage)) {
+          buffer.add(topicMessage);
+          sentMessages++;
+        }
+      } catch (Exception e) {
+        filterApplyErrors++;
+        log.trace("Error applying filter for message {}", topicMessage);
+      }
+    }
+  }
+
+  @Override
+  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
+    flush(sink);
+    super.sendFinishEvent(sink);
+  }
+
+  void flush(FluxSink<TopicMessageEventDTO> sink) {
+    sorted(buffer)
+        .forEach(topicMessage ->
+            sink.next(
+                new TopicMessageEventDTO()
+                    .type(TopicMessageEventDTO.TypeEnum.MESSAGE)
+                    .message(topicMessage)));
+    buffer.clear();
+  }
+
+  static Stream<TopicMessageDTO> sorted(List<TopicMessageDTO> messages) {
+    return messages.stream()
+        .sorted(Comparator.comparingLong(TopicMessageDTO::getOffset).reversed())
+        .sorted(Comparator.comparingInt(TopicMessageDTO::getPartition))
+        .sorted((m1, m2) -> {
+          if (m1.getPartition().equals(m2.getPartition())) {
+            return 0; //sorting is stable, so it will just keep messages in same order
+          }
+          return -m1.getTimestamp().compareTo(m2.getTimestamp());
+        });
+  }
+}

+ 1 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/ConsumerOffsetsSerde.java

@@ -2,6 +2,7 @@ package com.provectus.kafka.ui.serdes.builtin;
 
 
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.fasterxml.jackson.core.JsonGenerator;
 import com.fasterxml.jackson.databind.JsonSerializer;
 import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.SerializerProvider;
 import com.fasterxml.jackson.databind.SerializerProvider;
 import com.fasterxml.jackson.databind.json.JsonMapper;
 import com.fasterxml.jackson.databind.json.JsonMapper;
 import com.fasterxml.jackson.databind.module.SimpleModule;
 import com.fasterxml.jackson.databind.module.SimpleModule;

+ 12 - 18
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java

@@ -2,11 +2,13 @@ package com.provectus.kafka.ui.service;
 
 
 import com.google.common.util.concurrent.RateLimiter;
 import com.google.common.util.concurrent.RateLimiter;
 import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.config.ClustersProperties;
+import com.provectus.kafka.ui.emitter.AbstractEmitter;
 import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
 import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
 import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
 import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
 import com.provectus.kafka.ui.emitter.MessageFilters;
 import com.provectus.kafka.ui.emitter.MessageFilters;
 import com.provectus.kafka.ui.emitter.MessagesProcessing;
 import com.provectus.kafka.ui.emitter.MessagesProcessing;
 import com.provectus.kafka.ui.emitter.TailingEmitter;
 import com.provectus.kafka.ui.emitter.TailingEmitter;
+import com.provectus.kafka.ui.emitter.TimestampsSortedMessageProcessing;
 import com.provectus.kafka.ui.exception.TopicNotFoundException;
 import com.provectus.kafka.ui.exception.TopicNotFoundException;
 import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
@@ -231,37 +233,29 @@ public class MessagesService {
                                                       @Nullable String keySerde,
                                                       @Nullable String keySerde,
                                                       @Nullable String valueSerde) {
                                                       @Nullable String valueSerde) {
 
 
-    java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
-
-    var processing = new MessagesProcessing(
-        deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
-        getMsgFilter(query, filterQueryType),
-        seekDirection == SeekDirectionDTO.TAILING ? null : limit
-    );
-
-    if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
-      emitter = new ForwardRecordEmitter(
+    var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
+    var filter = getMsgFilter(query, filterQueryType);
+    AbstractEmitter emitter = switch (seekDirection) {
+      case FORWARD -> new ForwardRecordEmitter(
           () -> consumerGroupService.createConsumer(cluster),
           () -> consumerGroupService.createConsumer(cluster),
           consumerPosition,
           consumerPosition,
-          processing,
+          new MessagesProcessing(deserializer, filter, limit),
           cluster.getPollingSettings()
           cluster.getPollingSettings()
       );
       );
-    } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
-      emitter = new BackwardRecordEmitter(
+      case BACKWARD -> new BackwardRecordEmitter(
           () -> consumerGroupService.createConsumer(cluster),
           () -> consumerGroupService.createConsumer(cluster),
           consumerPosition,
           consumerPosition,
           limit,
           limit,
-          processing,
+          new TimestampsSortedMessageProcessing(deserializer, filter, limit),
           cluster.getPollingSettings()
           cluster.getPollingSettings()
       );
       );
-    } else {
-      emitter = new TailingEmitter(
+      case TAILING -> new TailingEmitter(
           () -> consumerGroupService.createConsumer(cluster),
           () -> consumerGroupService.createConsumer(cluster),
           consumerPosition,
           consumerPosition,
-          processing,
+          new MessagesProcessing(deserializer, filter, null),
           cluster.getPollingSettings()
           cluster.getPollingSettings()
       );
       );
-    }
+    };
     return Flux.create(emitter)
     return Flux.create(emitter)
         .map(getDataMasker(cluster, topic))
         .map(getDataMasker(cluster, topic))
         .map(throttleUiPublish(seekDirection));
         .map(throttleUiPublish(seekDirection));

+ 11 - 19
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java

@@ -4,8 +4,11 @@ import com.provectus.kafka.ui.emitter.EmptyPollsCounter;
 import com.provectus.kafka.ui.emitter.EnhancedConsumer;
 import com.provectus.kafka.ui.emitter.EnhancedConsumer;
 import com.provectus.kafka.ui.emitter.OffsetsInfo;
 import com.provectus.kafka.ui.emitter.OffsetsInfo;
 import com.provectus.kafka.ui.emitter.PollingSettings;
 import com.provectus.kafka.ui.emitter.PollingSettings;
+import com.provectus.kafka.ui.emitter.SeekOperations;
 import com.provectus.kafka.ui.exception.TopicAnalysisException;
 import com.provectus.kafka.ui.exception.TopicAnalysisException;
+import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.SeekTypeDTO;
 import com.provectus.kafka.ui.model.TopicAnalysisDTO;
 import com.provectus.kafka.ui.model.TopicAnalysisDTO;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.TopicsService;
 import com.provectus.kafka.ui.service.TopicsService;
@@ -44,7 +47,6 @@ public class TopicAnalysisService {
             startAnalysis(
             startAnalysis(
                 cluster,
                 cluster,
                 topicName,
                 topicName,
-                topic.getPartitionCount(),
                 topic.getPartitions().values()
                 topic.getPartitions().values()
                     .stream()
                     .stream()
                     .mapToLong(p -> p.getOffsetMax() - p.getOffsetMin())
                     .mapToLong(p -> p.getOffsetMax() - p.getOffsetMin())
@@ -55,13 +57,12 @@ public class TopicAnalysisService {
 
 
   private synchronized void startAnalysis(KafkaCluster cluster,
   private synchronized void startAnalysis(KafkaCluster cluster,
                                           String topic,
                                           String topic,
-                                          int partitionsCnt,
                                           long approxNumberOfMsgs) {
                                           long approxNumberOfMsgs) {
     var topicId = new TopicIdentity(cluster, topic);
     var topicId = new TopicIdentity(cluster, topic);
     if (analysisTasksStore.isAnalysisInProgress(topicId)) {
     if (analysisTasksStore.isAnalysisInProgress(topicId)) {
       throw new TopicAnalysisException("Topic is already analyzing");
       throw new TopicAnalysisException("Topic is already analyzing");
     }
     }
-    var task = new AnalysisTask(cluster, topicId, partitionsCnt, approxNumberOfMsgs, cluster.getPollingSettings());
+    var task = new AnalysisTask(cluster, topicId, approxNumberOfMsgs);
     analysisTasksStore.registerNewTask(topicId, task);
     analysisTasksStore.registerNewTask(topicId, task);
     Schedulers.boundedElastic().schedule(task);
     Schedulers.boundedElastic().schedule(task);
   }
   }
@@ -79,20 +80,16 @@ public class TopicAnalysisService {
     private final Instant startedAt = Instant.now();
     private final Instant startedAt = Instant.now();
 
 
     private final TopicIdentity topicId;
     private final TopicIdentity topicId;
-    private final int partitionsCnt;
     private final long approxNumberOfMsgs;
     private final long approxNumberOfMsgs;
-    private final EmptyPollsCounter emptyPollsCounter;
 
 
     private final TopicAnalysisStats totalStats = new TopicAnalysisStats();
     private final TopicAnalysisStats totalStats = new TopicAnalysisStats();
     private final Map<Integer, TopicAnalysisStats> partitionStats = new HashMap<>();
     private final Map<Integer, TopicAnalysisStats> partitionStats = new HashMap<>();
 
 
     private final EnhancedConsumer consumer;
     private final EnhancedConsumer consumer;
 
 
-    AnalysisTask(KafkaCluster cluster, TopicIdentity topicId, int partitionsCnt,
-                 long approxNumberOfMsgs, PollingSettings pollingSettings) {
+    AnalysisTask(KafkaCluster cluster, TopicIdentity topicId, long approxNumberOfMsgs) {
       this.topicId = topicId;
       this.topicId = topicId;
       this.approxNumberOfMsgs = approxNumberOfMsgs;
       this.approxNumberOfMsgs = approxNumberOfMsgs;
-      this.partitionsCnt = partitionsCnt;
       this.consumer = consumerGroupService.createConsumer(
       this.consumer = consumerGroupService.createConsumer(
           cluster,
           cluster,
           // to improve polling throughput
           // to improve polling throughput
@@ -101,7 +98,6 @@ public class TopicAnalysisService {
               ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100000"
               ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100000"
           )
           )
       );
       );
-      this.emptyPollsCounter = pollingSettings.createEmptyPollsCounter();
     }
     }
 
 
     @Override
     @Override
@@ -113,18 +109,14 @@ public class TopicAnalysisService {
     public void run() {
     public void run() {
       try {
       try {
         log.info("Starting {} topic analysis", topicId);
         log.info("Starting {} topic analysis", topicId);
-        var topicPartitions = IntStream.range(0, partitionsCnt)
-            .peek(i -> partitionStats.put(i, new TopicAnalysisStats()))
-            .mapToObj(i -> new TopicPartition(topicId.topicName, i))
-            .collect(Collectors.toList());
-
-        consumer.assign(topicPartitions);
-        consumer.seekToBeginning(topicPartitions);
+        var seekOperations = SeekOperations.create(
+            consumer,
+            new ConsumerPosition(SeekTypeDTO.BEGINNING, topicId.topicName, null)
+        );
+        seekOperations.assignAndSeekNonEmptyPartitions();
 
 
-        var offsetsInfo = new OffsetsInfo(consumer, topicId.topicName);
-        while (!offsetsInfo.assignedPartitionsFullyPolled() && !emptyPollsCounter.noDataEmptyPollsReached()) {
+        while (!seekOperations.assignedPartitionsFullyPolled()) {
           var polled = consumer.pollEnhanced(Duration.ofSeconds(3));
           var polled = consumer.pollEnhanced(Duration.ofSeconds(3));
-          emptyPollsCounter.count(polled.count());
           polled.forEach(r -> {
           polled.forEach(r -> {
             totalStats.apply(r);
             totalStats.apply(r);
             partitionStats.get(r.partition()).apply(r);
             partitionStats.get(r.partition()).apply(r);

+ 7 - 6
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java

@@ -13,6 +13,7 @@ import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
 import com.provectus.kafka.ui.emitter.MessagesProcessing;
 import com.provectus.kafka.ui.emitter.MessagesProcessing;
 import com.provectus.kafka.ui.emitter.PollingSettings;
 import com.provectus.kafka.ui.emitter.PollingSettings;
 import com.provectus.kafka.ui.emitter.PollingThrottler;
 import com.provectus.kafka.ui.emitter.PollingThrottler;
+import com.provectus.kafka.ui.emitter.TimestampsSortedMessageProcessing;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
@@ -126,7 +127,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
         this::createConsumer,
         this::createConsumer,
         new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
         new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
         100,
         100,
-        createMessagesProcessing(),
+        new TimestampsSortedMessageProcessing(RECORD_DESERIALIZER, msg -> true, null),
         PollingSettings.createDefault()
         PollingSettings.createDefault()
     );
     );
 
 
@@ -156,7 +157,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
         this::createConsumer,
         this::createConsumer,
         new ConsumerPosition(LATEST, TOPIC, null),
         new ConsumerPosition(LATEST, TOPIC, null),
         PARTITIONS * MSGS_PER_PARTITION,
         PARTITIONS * MSGS_PER_PARTITION,
-        createMessagesProcessing(),
+        new TimestampsSortedMessageProcessing(RECORD_DESERIALIZER, msg -> true, null),
         PollingSettings.createDefault()
         PollingSettings.createDefault()
     );
     );
 
 
@@ -185,7 +186,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
         this::createConsumer,
         this::createConsumer,
         new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
         new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
         PARTITIONS * MSGS_PER_PARTITION,
         PARTITIONS * MSGS_PER_PARTITION,
-        createMessagesProcessing(),
+        new TimestampsSortedMessageProcessing(RECORD_DESERIALIZER, msg -> true, null),
         PollingSettings.createDefault()
         PollingSettings.createDefault()
     );
     );
 
 
@@ -230,7 +231,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
         this::createConsumer,
         this::createConsumer,
         new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
         new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
         PARTITIONS * MSGS_PER_PARTITION,
         PARTITIONS * MSGS_PER_PARTITION,
-        createMessagesProcessing(),
+        new TimestampsSortedMessageProcessing(RECORD_DESERIALIZER, msg -> true, null),
         PollingSettings.createDefault()
         PollingSettings.createDefault()
     );
     );
 
 
@@ -261,7 +262,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
         this::createConsumer,
         this::createConsumer,
         new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
         new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
         numMessages,
         numMessages,
-        createMessagesProcessing(),
+        new TimestampsSortedMessageProcessing(RECORD_DESERIALIZER, msg -> true, null),
         PollingSettings.createDefault()
         PollingSettings.createDefault()
     );
     );
 
 
@@ -287,7 +288,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
         this::createConsumer,
         this::createConsumer,
         new ConsumerPosition(OFFSET, TOPIC, offsets),
         new ConsumerPosition(OFFSET, TOPIC, offsets),
         100,
         100,
-        createMessagesProcessing(),
+        new TimestampsSortedMessageProcessing(RECORD_DESERIALIZER, msg -> true, null),
         PollingSettings.createDefault()
         PollingSettings.createDefault()
     );
     );