Browse Source

Merge pull request #2 from gimral/MessagesV2

Messages v2
Gokhan Imral 1 year ago
parent
commit
838a34a853
45 changed files with 1700 additions and 1088 deletions
  1. 1 1
      kafka-ui-api/pom.xml
  2. 56 56
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  3. 9 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java
  4. 5 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java
  5. 131 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
  6. 8 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java
  7. 90 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java
  8. 5 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java
  9. 75 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java
  10. 15 24
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java
  11. 8 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java
  12. 11 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java
  13. 5 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java
  14. 9 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/RangePollingEmitter.java
  15. 50 62
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java
  16. 1 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java
  17. 65 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java
  18. 7 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java
  19. 109 44
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
  20. 45 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java
  21. 4 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java
  22. 79 79
      kafka-ui-api/src/main/resources/application-local.yml
  23. 103 15
      kafka-ui-api/src/main/resources/application.yml
  24. 1 1
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java
  25. 195 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java
  26. 1 1
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/MessageFiltersTest.java
  27. 35 14
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java
  28. 6 8
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java
  29. 86 56
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java
  30. 63 50
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
  31. 3 8
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
  32. 149 458
      kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
  33. 2 2
      kafka-ui-react-app/package.json
  34. 19 15
      kafka-ui-react-app/pnpm-lock.yaml
  35. 124 91
      kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/Filters.tsx
  36. 12 0
      kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/FiltersContainer.ts
  37. 0 0
      kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/getDefaultSerdeName.ts
  38. 12 33
      kafka-ui-react-app/src/components/Topics/Topic/Messages/Messages.tsx
  39. 7 6
      kafka-ui-react-app/src/components/Topics/Topic/Messages/MessagesTable.tsx
  40. 3 1
      kafka-ui-react-app/src/components/common/Search/Search.tsx
  41. 2 0
      kafka-ui-react-app/src/components/contexts/TopicMessagesContext.ts
  42. 25 0
      kafka-ui-react-app/src/lib/hooks/api/topics.ts
  43. 5 0
      kafka-ui-react-app/src/redux/interfaces/topic.ts
  44. 15 0
      kafka-ui-react-app/src/redux/reducers/topicMessages/selectors.ts
  45. 44 1
      kafka-ui-react-app/src/redux/reducers/topicMessages/topicMessagesSlice.ts

+ 1 - 1
kafka-ui-api/pom.xml

@@ -485,7 +485,7 @@
                             <verbose>true</verbose>
                             <images>
                                 <image>
-                                    <name>provectuslabs/kafka-ui:${git.revision}</name>
+                                    <name>gimral/kafka-ui:${git.revision}</name>
                                     <build>
                                         <contextDir>${project.basedir}</contextDir>
                                         <args>

+ 56 - 56
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -5,13 +5,14 @@ import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_
 import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ;
 import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY;
 import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
-import static java.util.stream.Collectors.toMap;
 
 import com.provectus.kafka.ui.api.MessagesApi;
-import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
+import com.provectus.kafka.ui.model.MessageFilterIdDTO;
+import com.provectus.kafka.ui.model.MessageFilterRegistrationDTO;
 import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
+import com.provectus.kafka.ui.model.PollingModeDTO;
 import com.provectus.kafka.ui.model.SeekDirectionDTO;
 import com.provectus.kafka.ui.model.SeekTypeDTO;
 import com.provectus.kafka.ui.model.SerdeUsageDTO;
@@ -25,14 +26,11 @@ import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.DeserializationService;
 import com.provectus.kafka.ui.service.MessagesService;
 import java.util.List;
-import java.util.Map;
 import java.util.Optional;
-import javax.annotation.Nullable;
 import javax.validation.Valid;
+import javax.validation.ValidationException;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.tuple.Pair;
-import org.apache.kafka.common.TopicPartition;
 import org.springframework.http.ResponseEntity;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.server.ServerWebExchange;
@@ -76,6 +74,7 @@ public class MessagesController extends AbstractController implements MessagesAp
         .map(ResponseEntity::ok);
   }
 
+  @Deprecated
   @Override
   public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessages(String clusterName,
                                                                            String topicName,
@@ -88,6 +87,23 @@ public class MessagesController extends AbstractController implements MessagesAp
                                                                            String keySerde,
                                                                            String valueSerde,
                                                                            ServerWebExchange exchange) {
+    throw new ValidationException("Not supported");
+  }
+
+
+  @Override
+  public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessagesV2(String clusterName, String topicName,
+                                                                             PollingModeDTO mode,
+                                                                             List<Integer> partitions,
+                                                                             Integer limit,
+                                                                             String stringFilter,
+                                                                             String smartFilterId,
+                                                                             Long offset,
+                                                                             Long timestamp,
+                                                                             String keySerde,
+                                                                             String valueSerde,
+                                                                             String cursor,
+                                                                             ServerWebExchange exchange) {
     var contextBuilder = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
@@ -98,27 +114,26 @@ public class MessagesController extends AbstractController implements MessagesAp
       contextBuilder.auditActions(AuditAction.VIEW);
     }
 
-    seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
-    seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
-    filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
-
-    var positions = new ConsumerPosition(
-        seekType,
-        topicName,
-        parseSeekTo(topicName, seekType, seekTo)
-    );
-    Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> job = Mono.just(
-        ResponseEntity.ok(
-            messagesService.loadMessages(
-                getCluster(clusterName), topicName, positions, q, filterQueryType,
-                limit, seekDirection, keySerde, valueSerde)
-        )
-    );
-
-    var context = contextBuilder.build();
-    return validateAccess(context)
-        .then(job)
-        .doOnEach(sig -> audit(context, sig));
+    var accessContext = contextBuilder.build();
+
+    Flux<TopicMessageEventDTO> messagesFlux;
+    if (cursor != null) {
+      messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor);
+    } else {
+      messagesFlux = messagesService.loadMessages(
+          getCluster(clusterName),
+          topicName,
+          ConsumerPosition.create(mode, topicName, partitions, timestamp, offset),
+          stringFilter,
+          smartFilterId,
+          limit,
+          keySerde,
+          valueSerde
+      );
+    }
+    return accessControlService.validateAccess(accessContext)
+        .then(Mono.just(ResponseEntity.ok(messagesFlux)))
+        .doOnEach(sig -> auditService.audit(accessContext, sig));
   }
 
   @Override
@@ -140,34 +155,6 @@ public class MessagesController extends AbstractController implements MessagesAp
     ).doOnEach(sig -> audit(context, sig));
   }
 
-  /**
-   * The format is [partition]::[offset] for specifying offsets
-   * or [partition]::[timestamp in millis] for specifying timestamps.
-   */
-  @Nullable
-  private Map<TopicPartition, Long> parseSeekTo(String topic, SeekTypeDTO seekType, List<String> seekTo) {
-    if (seekTo == null || seekTo.isEmpty()) {
-      if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) {
-        return null;
-      }
-      throw new ValidationException("seekTo should be set if seekType is " + seekType);
-    }
-    return seekTo.stream()
-        .map(p -> {
-          String[] split = p.split("::");
-          if (split.length != 2) {
-            throw new IllegalArgumentException(
-                "Wrong seekTo argument format. See API docs for details");
-          }
-
-          return Pair.of(
-              new TopicPartition(topic, Integer.parseInt(split[0])),
-              Long.parseLong(split[1])
-          );
-        })
-        .collect(toMap(Pair::getKey, Pair::getValue));
-  }
-
   @Override
   public Mono<ResponseEntity<TopicSerdeSuggestionDTO>> getSerdes(String clusterName,
                                                                  String topicName,
@@ -195,7 +182,20 @@ public class MessagesController extends AbstractController implements MessagesAp
     );
   }
 
+  @Override
+  public Mono<ResponseEntity<MessageFilterIdDTO>> registerFilter(String clusterName,
+                                                                 String topicName,
+                                                                 Mono<MessageFilterRegistrationDTO> registration,
+                                                                 ServerWebExchange exchange) {
 
+    final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .topic(topicName)
+        .topicActions(MESSAGES_READ)
+        .build());
 
-
+    return validateAccess.then(registration)
+        .map(reg -> messagesService.registerMessageFilter(reg.getFilterCode()))
+        .map(id -> ResponseEntity.ok(new MessageFilterIdDTO().id(id)));
+  }
 }

+ 9 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java

@@ -1,6 +1,7 @@
 package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import jakarta.annotation.Nullable;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
@@ -21,12 +22,14 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
     return records;
   }
 
-  protected boolean sendLimitReached() {
+  protected boolean isSendLimitReached() {
     return messagesProcessing.limitReached();
   }
 
-  protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
-    messagesProcessing.send(sink, records);
+  protected void send(FluxSink<TopicMessageEventDTO> sink,
+                      Iterable<ConsumerRecord<Bytes, Bytes>> records,
+                      @Nullable Cursor.Tracking cursor) {
+    messagesProcessing.send(sink, records, cursor);
   }
 
   protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
@@ -37,8 +40,9 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
     messagesProcessing.sentConsumingInfo(sink, records);
   }
 
-  protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
-    messagesProcessing.sendFinishEvent(sink);
+  // cursor is null if target partitions were fully polled (no, need to do paging)
+  protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
+    messagesProcessing.sendFinishEvents(sink, cursor);
     sink.complete();
   }
 }

+ 5 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardEmitter.java

@@ -18,18 +18,15 @@ public class BackwardEmitter extends RangePollingEmitter {
                          int messagesPerPage,
                          ConsumerRecordDeserializer deserializer,
                          Predicate<TopicMessageDTO> filter,
-                         PollingSettings pollingSettings) {
+                         PollingSettings pollingSettings,
+                         Cursor.Tracking cursor) {
     super(
         consumerSupplier,
         consumerPosition,
         messagesPerPage,
-        new MessagesProcessing(
-            deserializer,
-            filter,
-            false,
-            messagesPerPage
-        ),
-        pollingSettings
+        new MessagesProcessing(deserializer, filter, false, messagesPerPage),
+        pollingSettings,
+        cursor
     );
   }
 

+ 131 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java

@@ -0,0 +1,131 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.TreeMap;
+import java.util.function.Supplier;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.InterruptException;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.core.publisher.FluxSink;
+
+@Slf4j
+public class BackwardRecordEmitter extends AbstractEmitter {
+
+  private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
+  private final ConsumerPosition consumerPosition;
+  private final int messagesPerPage;
+  private final Cursor.Tracking cursor;
+
+  public BackwardRecordEmitter(
+      Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
+      ConsumerPosition consumerPosition,
+      int messagesPerPage,
+      MessagesProcessing messagesProcessing,
+      PollingSettings pollingSettings,
+      Cursor.Tracking cursor) {
+    super(messagesProcessing, pollingSettings);
+    this.consumerPosition = consumerPosition;
+    this.messagesPerPage = messagesPerPage;
+    this.consumerSupplier = consumerSupplier;
+    this.cursor = cursor;
+  }
+
+  @Override
+  public void accept(FluxSink<TopicMessageEventDTO> sink) {
+    log.debug("Starting backward polling for {}", consumerPosition);
+    try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
+      sendPhase(sink, "Created consumer");
+
+      var seekOperations = SeekOperations.create(consumer, consumerPosition);
+      var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
+      readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
+      cursor.trackOffsets(readUntilOffsets);
+
+      int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
+      log.debug("'Until' offsets for polling: {}", readUntilOffsets);
+
+      while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !isSendLimitReached()) {
+        new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
+          if (sink.isCancelled()) {
+            return; //fast return in case of sink cancellation
+          }
+          long beginOffset = seekOperations.getBeginOffsets().get(tp);
+          long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
+
+          partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
+              .forEach(r -> sendMessage(sink, r));
+
+          if (beginOffset == readFromOffset) {
+            // we fully read this partition -> removing it from polling iterations
+            readUntilOffsets.remove(tp);
+          } else {
+            // updating 'to' offset for next polling iteration
+            readUntilOffsets.put(tp, readFromOffset);
+          }
+        });
+        if (readUntilOffsets.isEmpty()) {
+          log.debug("begin reached after partitions poll iteration");
+        } else if (sink.isCancelled()) {
+          log.debug("sink is cancelled after partitions poll iteration");
+        }
+      }
+      sendFinishStatsAndCompleteSink(sink, readUntilOffsets.isEmpty() ? null : cursor);
+      log.debug("Polling finished");
+    } catch (InterruptException kafkaInterruptException) {
+      log.debug("Polling finished due to thread interruption");
+      sink.complete();
+    } catch (Exception e) {
+      log.error("Error occurred while consuming records", e);
+      sink.error(e);
+    }
+  }
+
+  private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
+      TopicPartition tp,
+      long fromOffset,
+      long toOffset,
+      Consumer<Bytes, Bytes> consumer,
+      FluxSink<TopicMessageEventDTO> sink
+  ) {
+    consumer.assign(Collections.singleton(tp));
+    consumer.seek(tp, fromOffset);
+    cursor.trackOffset(tp, fromOffset);
+    sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset));
+    int desiredMsgsToPoll = (int) (toOffset - fromOffset);
+
+    var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
+
+    EmptyPollsCounter emptyPolls  = pollingSettings.createEmptyPollsCounter();
+    while (!sink.isCancelled()
+        && !isSendLimitReached()
+        && recordsToSend.size() < desiredMsgsToPoll
+        && !emptyPolls.noDataEmptyPollsReached()) {
+      var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
+      emptyPolls.count(polledRecords);
+
+      log.debug("{} records polled from {}", polledRecords.count(), tp);
+
+      var filteredRecords = polledRecords.records(tp).stream()
+          .filter(r -> r.offset() < toOffset)
+          .toList();
+
+      if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
+        // we already read all messages in target offsets interval
+        break;
+      }
+      recordsToSend.addAll(filteredRecords);
+    }
+    log.debug("{} records to send", recordsToSend.size());
+    Collections.reverse(recordsToSend);
+    return recordsToSend;
+  }
+}

+ 8 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java

@@ -2,6 +2,8 @@ package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO;
+import javax.annotation.Nullable;
 import reactor.core.publisher.FluxSink;
 
 class ConsumingStats {
@@ -26,10 +28,15 @@ class ConsumingStats {
     filterApplyErrors++;
   }
 
-  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
+  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
     sink.next(
         new TopicMessageEventDTO()
             .type(TopicMessageEventDTO.TypeEnum.DONE)
+            .cursor(
+                cursor != null
+                    ? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor())
+                    : null
+            )
             .consuming(createConsumingStats())
     );
   }

+ 90 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/Cursor.java

@@ -0,0 +1,90 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.google.common.collect.HashBasedTable;
+import com.google.common.collect.Table;
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.PollingModeDTO;
+import com.provectus.kafka.ui.model.TopicMessageDTO;
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import org.apache.kafka.common.TopicPartition;
+
+public record Cursor(ConsumerRecordDeserializer deserializer,
+                     ConsumerPosition consumerPosition,
+                     Predicate<TopicMessageDTO> filter,
+                     int limit) {
+
+  public static class Tracking {
+    private final ConsumerRecordDeserializer deserializer;
+    private final ConsumerPosition originalPosition;
+    private final Predicate<TopicMessageDTO> filter;
+    private final int limit;
+    private final Function<Cursor, String> registerAction;
+
+    //topic -> partition -> offset
+    private final Table<String, Integer, Long> trackingOffsets = HashBasedTable.create();
+
+    public Tracking(ConsumerRecordDeserializer deserializer,
+                    ConsumerPosition originalPosition,
+                    Predicate<TopicMessageDTO> filter,
+                    int limit,
+                    Function<Cursor, String> registerAction) {
+      this.deserializer = deserializer;
+      this.originalPosition = originalPosition;
+      this.filter = filter;
+      this.limit = limit;
+      this.registerAction = registerAction;
+    }
+
+    void trackOffset(String topic, int partition, long offset) {
+      trackingOffsets.put(topic, partition, offset);
+    }
+
+    void initOffsets(Map<TopicPartition, Long> initialSeekOffsets) {
+      initialSeekOffsets.forEach((tp, off) -> trackOffset(tp.topic(), tp.partition(), off));
+    }
+
+    private Map<TopicPartition, Long> getOffsetsMap(int offsetToAdd) {
+      Map<TopicPartition, Long> result = new HashMap<>();
+      trackingOffsets.rowMap()
+          .forEach((topic, partsMap) ->
+              partsMap.forEach((p, off) -> result.put(new TopicPartition(topic, p), off + offsetToAdd)));
+      return result;
+    }
+
+    String registerCursor() {
+      return registerAction.apply(
+          new Cursor(
+              deserializer,
+              new ConsumerPosition(
+                  switch (originalPosition.pollingMode()) {
+                    case TO_OFFSET, TO_TIMESTAMP, LATEST -> PollingModeDTO.TO_OFFSET;
+                    case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> PollingModeDTO.FROM_OFFSET;
+                    case TAILING -> throw new IllegalStateException();
+                  },
+                  originalPosition.topic(),
+                  originalPosition.partitions(),
+                  null,
+                  new ConsumerPosition.Offsets(
+                      null,
+                      getOffsetsMap(
+                          switch (originalPosition.pollingMode()) {
+                            case TO_OFFSET, TO_TIMESTAMP, LATEST -> 0;
+                            // when doing forward polling we need to start from latest msg's offset + 1
+                            case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> 1;
+                            case TAILING -> throw new IllegalStateException();
+                          }
+                      )
+                  )
+              ),
+              filter,
+              limit
+          )
+      );
+    }
+  }
+
+}

+ 5 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardEmitter.java

@@ -18,18 +18,15 @@ public class ForwardEmitter extends RangePollingEmitter {
                         int messagesPerPage,
                         ConsumerRecordDeserializer deserializer,
                         Predicate<TopicMessageDTO> filter,
-                        PollingSettings pollingSettings) {
+                        PollingSettings pollingSettings,
+                        Cursor.Tracking cursor) {
     super(
         consumerSupplier,
         consumerPosition,
         messagesPerPage,
-        new MessagesProcessing(
-            deserializer,
-            filter,
-            true,
-            messagesPerPage
-        ),
-        pollingSettings
+        new MessagesProcessing(deserializer, filter, true, messagesPerPage),
+        pollingSettings,
+        cursor
     );
   }
 

+ 75 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java

@@ -0,0 +1,75 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import java.util.function.Supplier;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.errors.InterruptException;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.core.publisher.FluxSink;
+
+@Slf4j
+public class ForwardRecordEmitter extends AbstractEmitter {
+
+  private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
+  private final ConsumerPosition position;
+  private final Cursor.Tracking cursor;
+
+  public ForwardRecordEmitter(
+      Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
+      ConsumerPosition position,
+      MessagesProcessing messagesProcessing,
+      PollingSettings pollingSettings,
+      Cursor.Tracking cursor) {
+    super(messagesProcessing, pollingSettings);
+    this.position = position;
+    this.consumerSupplier = consumerSupplier;
+    this.cursor = cursor;
+  }
+
+  @Override
+  public void accept(FluxSink<TopicMessageEventDTO> sink) {
+    log.debug("Starting forward polling for {}", position);
+    try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
+      sendPhase(sink, "Assigning partitions");
+      var seekOperations = SeekOperations.create(consumer, position);
+      seekOperations.assignAndSeek();
+      cursor.trackOffsets(seekOperations.getOffsetsForSeek());
+
+      EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
+      while (!sink.isCancelled()
+          && !isSendLimitReached()
+          && !seekOperations.assignedPartitionsFullyPolled()
+          && !emptyPolls.noDataEmptyPollsReached()) {
+
+        sendPhase(sink, "Polling");
+        ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
+        emptyPolls.count(records);
+        log.debug("{} records polled", records.count());
+
+        for (TopicPartition tp : records.partitions()) {
+          for (ConsumerRecord<Bytes, Bytes> record : records.records(tp)) {
+            // checking if send limit reached - if so, we will skip some
+            // of already polled records (and we don't need to track their offsets) - they
+            // should be present on next page, polled by cursor
+            if (!isSendLimitReached()) {
+              sendMessage(sink, record);
+              cursor.trackOffset(tp, record.offset() + 1);
+            }
+          }
+        }
+      }
+      sendFinishStatsAndCompleteSink(sink, !isSendLimitReached() ? null : cursor);
+      log.debug("Polling finished");
+    } catch (InterruptException kafkaInterruptException) {
+      log.debug("Polling finished due to thread interruption");
+      sink.complete();
+    } catch (Exception e) {
+      log.error("Error occurred while consuming records", e);
+      sink.error(e);
+    }
+  }
+
+}

+ 15 - 24
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java

@@ -1,7 +1,6 @@
 package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.exception.ValidationException;
-import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
 import com.provectus.kafka.ui.model.TopicMessageDTO;
 import groovy.json.JsonSlurper;
 import java.util.function.Predicate;
@@ -22,59 +21,51 @@ public class MessageFilters {
   private MessageFilters() {
   }
 
-  public static Predicate<TopicMessageDTO> createMsgFilter(String query, MessageFilterTypeDTO type) {
-    switch (type) {
-      case STRING_CONTAINS:
-        return containsStringFilter(query);
-      case GROOVY_SCRIPT:
-        return groovyScriptFilter(query);
-      default:
-        throw new IllegalStateException("Unknown query type: " + type);
-    }
+  public static Predicate<TopicMessageDTO> noop() {
+    return e -> true;
   }
 
-  static Predicate<TopicMessageDTO> containsStringFilter(String string) {
-    return msg -> StringUtils.contains(msg.getKey(), string)
-        || StringUtils.contains(msg.getContent(), string);
+  public static Predicate<TopicMessageDTO> containsStringFilter(String string) {
+    return msg -> StringUtils.containsIgnoreCase(msg.getKey(), string)
+        || StringUtils.containsIgnoreCase(msg.getContent(), string);
   }
 
-  static Predicate<TopicMessageDTO> groovyScriptFilter(String script) {
-    var engine = getGroovyEngine();
-    var compiledScript = compileScript(engine, script);
+  public static Predicate<TopicMessageDTO> groovyScriptFilter(String script) {
+    var compiledScript = compileScript(script);
     var jsonSlurper = new JsonSlurper();
     return new Predicate<TopicMessageDTO>() {
       @SneakyThrows
       @Override
       public boolean test(TopicMessageDTO msg) {
-        var bindings = engine.createBindings();
+        var bindings = getGroovyEngine().createBindings();
         bindings.put("partition", msg.getPartition());
         bindings.put("offset", msg.getOffset());
         bindings.put("timestampMs", msg.getTimestamp().toInstant().toEpochMilli());
         bindings.put("keyAsText", msg.getKey());
         bindings.put("valueAsText", msg.getContent());
         bindings.put("headers", msg.getHeaders());
-        bindings.put("key", parseToJsonOrReturnAsIs(jsonSlurper, msg.getKey()));
-        bindings.put("value", parseToJsonOrReturnAsIs(jsonSlurper, msg.getContent()));
+        bindings.put("key", parseToJsonOrReturnNull(jsonSlurper, msg.getKey()));
+        bindings.put("value", parseToJsonOrReturnNull(jsonSlurper, msg.getContent()));
         var result = compiledScript.eval(bindings);
         if (result instanceof Boolean) {
           return (Boolean) result;
         } else {
           throw new ValidationException(
-              "Unexpected script result: %s, Boolean should be returned instead".formatted(result));
+              String.format("Unexpected script result: %s, Boolean should be returned instead", result));
         }
       }
     };
   }
 
   @Nullable
-  private static Object parseToJsonOrReturnAsIs(JsonSlurper parser, @Nullable String str) {
+  private static Object parseToJsonOrReturnNull(JsonSlurper parser, @Nullable String str) {
     if (str == null) {
       return null;
     }
     try {
       return parser.parseText(str);
     } catch (Exception e) {
-      return str;
+      return null;
     }
   }
 
@@ -87,9 +78,9 @@ public class MessageFilters {
     return GROOVY_ENGINE;
   }
 
-  private static CompiledScript compileScript(GroovyScriptEngineImpl engine, String script) {
+  private static CompiledScript compileScript(String script) {
     try {
-      return engine.compile(script);
+      return getGroovyEngine().compile(script);
     } catch (ScriptException e) {
       throw new ValidationException("Script syntax error: " + e.getMessage());
     }

+ 8 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java

@@ -39,7 +39,9 @@ class MessagesProcessing {
     return limit != null && sentMessages >= limit;
   }
 
-  void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
+  void send(FluxSink<TopicMessageEventDTO> sink,
+            Iterable<ConsumerRecord<Bytes, Bytes>> polled,
+            @Nullable Cursor.Tracking cursor) {
     sortForSending(polled, ascendingSortBeforeSend)
         .forEach(rec -> {
           if (!limitReached() && !sink.isCancelled()) {
@@ -53,6 +55,9 @@ class MessagesProcessing {
                 );
                 sentMessages++;
               }
+              if (cursor != null) {
+                cursor.trackOffset(rec.topic(), rec.partition(), rec.offset());
+              }
             } catch (Exception e) {
               consumingStats.incFilterApplyError();
               log.trace("Error applying filter for message {}", topicMessage);
@@ -67,9 +72,9 @@ class MessagesProcessing {
     }
   }
 
-  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
+  void sendFinishEvents(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
     if (!sink.isCancelled()) {
-      consumingStats.sendFinishEvent(sink);
+      consumingStats.sendFinishEvent(sink, cursor);
     }
   }
 

+ 11 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java

@@ -1,19 +1,20 @@
 package com.provectus.kafka.ui.emitter;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Collectors;
 import lombok.Getter;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.mutable.MutableLong;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.common.TopicPartition;
 
 @Slf4j
 @Getter
-class OffsetsInfo {
+public class OffsetsInfo {
 
   private final Consumer<?, ?> consumer;
 
@@ -23,15 +24,16 @@ class OffsetsInfo {
   private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
   private final Set<TopicPartition> emptyPartitions = new HashSet<>();
 
-  OffsetsInfo(Consumer<?, ?> consumer, String topic) {
+  public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
     this(consumer,
         consumer.partitionsFor(topic).stream()
             .map(pi -> new TopicPartition(topic, pi.partition()))
-            .toList()
+            .collect(Collectors.toList())
     );
   }
 
-  OffsetsInfo(Consumer<?, ?> consumer, Collection<TopicPartition> targetPartitions) {
+  public OffsetsInfo(Consumer<?, ?> consumer,
+                     Collection<TopicPartition> targetPartitions) {
     this.consumer = consumer;
     this.beginOffsets = consumer.beginningOffsets(targetPartitions);
     this.endOffsets = consumer.endOffsets(targetPartitions);
@@ -45,8 +47,8 @@ class OffsetsInfo {
     });
   }
 
-  boolean assignedPartitionsFullyPolled() {
-    for (var tp : consumer.assignment()) {
+  public boolean assignedPartitionsFullyPolled() {
+    for (var tp: consumer.assignment()) {
       Preconditions.checkArgument(endOffsets.containsKey(tp));
       if (endOffsets.get(tp) > consumer.position(tp)) {
         return false;
@@ -55,10 +57,8 @@ class OffsetsInfo {
     return true;
   }
 
-  long summaryOffsetsRange() {
-    MutableLong cnt = new MutableLong();
-    nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp)));
-    return cnt.getValue();
+  public Set<TopicPartition> allTargetPartitions() {
+    return Sets.union(nonEmptyPartitions, emptyPartitions);
   }
 
 }

+ 5 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PolledRecords.java

@@ -3,6 +3,7 @@ package com.provectus.kafka.ui.emitter;
 import java.time.Duration;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Set;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.common.TopicPartition;
@@ -32,6 +33,10 @@ public record PolledRecords(int count,
     return records.iterator();
   }
 
+  public Set<TopicPartition> partitions() {
+    return records.partitions();
+  }
+
   private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
     int polledBytes = 0;
     for (ConsumerRecord<Bytes, Bytes> rec : recs) {

+ 9 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/RangePollingEmitter.java

@@ -17,6 +17,7 @@ import reactor.core.publisher.FluxSink;
 abstract class RangePollingEmitter extends AbstractEmitter {
 
   private final Supplier<EnhancedConsumer> consumerSupplier;
+  private final Cursor.Tracking cursor;
   protected final ConsumerPosition consumerPosition;
   protected final int messagesPerPage;
 
@@ -24,11 +25,13 @@ abstract class RangePollingEmitter extends AbstractEmitter {
                                 ConsumerPosition consumerPosition,
                                 int messagesPerPage,
                                 MessagesProcessing messagesProcessing,
-                                PollingSettings pollingSettings) {
+                                PollingSettings pollingSettings,
+                                Cursor.Tracking cursor) {
     super(messagesProcessing, pollingSettings);
     this.consumerPosition = consumerPosition;
     this.messagesPerPage = messagesPerPage;
     this.consumerSupplier = consumerSupplier;
+    this.cursor = cursor;
   }
 
   protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
@@ -46,18 +49,20 @@ abstract class RangePollingEmitter extends AbstractEmitter {
     try (EnhancedConsumer consumer = consumerSupplier.get()) {
       sendPhase(sink, "Consumer created");
       var seekOperations = SeekOperations.create(consumer, consumerPosition);
+      cursor.initOffsets(seekOperations.getOffsetsForSeek());
+
       TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
       log.debug("Starting from offsets {}", pollRange);
 
-      while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
+      while (!sink.isCancelled() && !pollRange.isEmpty() && !isSendLimitReached()) {
         var polled = poll(consumer, sink, pollRange);
-        send(sink, polled);
+        send(sink, polled, cursor);
         pollRange = nextPollingRange(pollRange, seekOperations);
       }
       if (sink.isCancelled()) {
         log.debug("Polling finished due to sink cancellation");
       }
-      sendFinishStatsAndCompleteSink(sink);
+      sendFinishStatsAndCompleteSink(sink, pollRange.isEmpty() ? null : cursor);
       log.debug("Polling finished");
     } catch (InterruptException kafkaInterruptException) {
       log.debug("Polling finished due to thread interruption");

+ 50 - 62
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java

@@ -1,38 +1,31 @@
 package com.provectus.kafka.ui.emitter;
 
+import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
+import static java.util.Objects.requireNonNull;
+
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
 import com.provectus.kafka.ui.model.ConsumerPosition;
-import com.provectus.kafka.ui.model.SeekTypeDTO;
+import com.provectus.kafka.ui.model.PollingModeDTO;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.stream.Collectors;
-import javax.annotation.Nullable;
 import lombok.AccessLevel;
 import lombok.RequiredArgsConstructor;
-import org.apache.commons.lang3.mutable.MutableLong;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.common.TopicPartition;
 
 @RequiredArgsConstructor(access = AccessLevel.PACKAGE)
-public class SeekOperations {
+class SeekOperations {
 
   private final Consumer<?, ?> consumer;
   private final OffsetsInfo offsetsInfo;
   private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
 
   public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
-    OffsetsInfo offsetsInfo;
-    if (consumerPosition.getSeekTo() == null) {
-      offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
-    } else {
-      offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet());
-    }
-    return new SeekOperations(
-        consumer,
-        offsetsInfo,
-        getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo())
-    );
+    OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty()
+        ? new OffsetsInfo(consumer, consumerPosition.topic())
+        : new OffsetsInfo(consumer, consumerPosition.partitions());
+    var offsetsToSeek = getOffsetsForSeek(consumer, offsetsInfo, consumerPosition);
+    return new SeekOperations(consumer, offsetsInfo, offsetsToSeek);
   }
 
   public void assignAndSeekNonEmptyPartitions() {
@@ -40,32 +33,16 @@ public class SeekOperations {
     offsetsForSeek.forEach(consumer::seek);
   }
 
-  public Map<TopicPartition, Long> getBeginOffsets() {
+  Map<TopicPartition, Long> getBeginOffsets() {
     return offsetsInfo.getBeginOffsets();
   }
 
-  public Map<TopicPartition, Long> getEndOffsets() {
-    return offsetsInfo.getEndOffsets();
-  }
-
-  public boolean assignedPartitionsFullyPolled() {
+  boolean assignedPartitionsFullyPolled() {
     return offsetsInfo.assignedPartitionsFullyPolled();
   }
 
-  // sum of (end - start) offsets for all partitions
-  public long summaryOffsetsRange() {
-    return offsetsInfo.summaryOffsetsRange();
-  }
-
-  // sum of differences between initial consumer seek and current consumer position (across all partitions)
-  public long offsetsProcessedFromSeek() {
-    MutableLong count = new MutableLong();
-    offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset));
-    return count.getValue();
-  }
-
   // Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
-  public Map<TopicPartition, Long> getOffsetsForSeek() {
+  Map<TopicPartition, Long> getOffsetsForSeek() {
     return offsetsForSeek;
   }
 
@@ -75,27 +52,26 @@ public class SeekOperations {
   @VisibleForTesting
   static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
                                                      OffsetsInfo offsetsInfo,
-                                                     SeekTypeDTO seekType,
-                                                     @Nullable Map<TopicPartition, Long> seekTo) {
-    switch (seekType) {
-      case LATEST:
-        return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
-      case BEGINNING:
-        return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
-      case OFFSET:
-        Preconditions.checkNotNull(seekTo);
-        return fixOffsets(offsetsInfo, seekTo);
-      case TIMESTAMP:
-        Preconditions.checkNotNull(seekTo);
-        return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
-      default:
-        throw new IllegalStateException();
-    }
+                                                     ConsumerPosition position) {
+    return switch (position.pollingMode()) {
+      case TAILING -> consumer.endOffsets(offsetsInfo.allTargetPartitions());
+      case LATEST -> consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
+      case EARLIEST -> consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
+      case FROM_OFFSET, TO_OFFSET -> fixOffsets(offsetsInfo, requireNonNull(position.offsets()));
+      case FROM_TIMESTAMP, TO_TIMESTAMP ->
+          offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, requireNonNull(position.timestamp()));
+    };
   }
 
-  private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo, Map<TopicPartition, Long> offsets) {
-    offsets = new HashMap<>(offsets);
-    offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
+  private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo,
+                                                      ConsumerPosition.Offsets positionOffset) {
+    var offsets = new HashMap<TopicPartition, Long>();
+    if (positionOffset.offset() != null) {
+      offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset()));
+    } else {
+      offsets.putAll(requireNonNull(positionOffset.tpOffsets()));
+      offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
+    }
 
     Map<TopicPartition, Long> result = new HashMap<>();
     offsets.forEach((tp, targetOffset) -> {
@@ -112,13 +88,25 @@ public class SeekOperations {
     return result;
   }
 
-  private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
-                                                               Map<TopicPartition, Long> timestamps) {
-    timestamps = new HashMap<>(timestamps);
-    timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
+  private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer,
+                                                               PollingModeDTO pollingMode,
+                                                               OffsetsInfo offsetsInfo,
+                                                               Long timestamp) {
+    Map<TopicPartition, Long> timestamps = new HashMap<>();
+    offsetsInfo.getNonEmptyPartitions().forEach(tp -> timestamps.put(tp, timestamp));
 
-    return consumer.offsetsForTimes(timestamps).entrySet().stream()
-        .filter(e -> e.getValue() != null)
-        .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
+    Map<TopicPartition, Long> result = new HashMap<>();
+    consumer.offsetsForTimes(timestamps).forEach((tp, offsetAndTimestamp) -> {
+      if (offsetAndTimestamp == null) {
+        if (pollingMode == TO_TIMESTAMP && offsetsInfo.getNonEmptyPartitions().contains(tp)) {
+          // if no offset was returned this means that *all* timestamps are lower
+          // than target timestamp. Is case of TO_OFFSET mode we need to read from the ending of tp
+          result.put(tp, offsetsInfo.getEndOffsets().get(tp));
+        }
+      } else {
+        result.put(tp, offsetAndTimestamp.offset());
+      }
+    });
+    return result;
   }
 }

+ 1 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java

@@ -35,7 +35,7 @@ public class TailingEmitter extends AbstractEmitter {
       while (!sink.isCancelled()) {
         sendPhase(sink, "Polling");
         var polled = poll(sink, consumer);
-        send(sink, polled);
+        send(sink, polled, null);
       }
       sink.complete();
       log.debug("Tailing finished");
@@ -55,5 +55,4 @@ public class TailingEmitter extends AbstractEmitter {
     consumer.assign(seekOffsets.keySet());
     seekOffsets.forEach(consumer::seek);
   }
-
 }

+ 65 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java

@@ -1,14 +1,72 @@
 package com.provectus.kafka.ui.model;
 
+import com.google.common.base.Preconditions;
+import com.provectus.kafka.ui.exception.ValidationException;
+import java.util.List;
 import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
 import javax.annotation.Nullable;
-import lombok.Value;
 import org.apache.kafka.common.TopicPartition;
 
-@Value
-public class ConsumerPosition {
-  SeekTypeDTO seekType;
-  String topic;
-  @Nullable
-  Map<TopicPartition, Long> seekTo; // null if positioning should apply to all tps
+public record ConsumerPosition(PollingModeDTO pollingMode,
+                               String topic,
+                               List<TopicPartition> partitions, //all partitions if list is empty
+                               @Nullable Long timestamp,
+                               @Nullable Offsets offsets) {
+
+  public record Offsets(@Nullable Long offset, //should be applied to all partitions
+                        @Nullable Map<TopicPartition, Long> tpOffsets) {
+    public Offsets {
+      // only one of properties should be set
+      Preconditions.checkArgument((offset == null && tpOffsets != null) || (offset != null && tpOffsets == null));
+    }
+  }
+
+  public static ConsumerPosition create(PollingModeDTO pollingMode,
+                                        String topic,
+                                        @Nullable List<Integer> partitions,
+                                        @Nullable Long timestamp,
+                                        @Nullable Long offset) {
+    @Nullable var offsets = parseAndValidateOffsets(pollingMode, offset);
+
+    var topicPartitions = Optional.ofNullable(partitions).orElse(List.of())
+        .stream()
+        .map(p -> new TopicPartition(topic, p))
+        .collect(Collectors.toList());
+
+    // if offsets are specified - inferring partitions list from there
+    topicPartitions = (offsets != null && offsets.tpOffsets() != null)
+        ? List.copyOf(offsets.tpOffsets().keySet())
+        : topicPartitions;
+
+    return new ConsumerPosition(
+        pollingMode,
+        topic,
+        topicPartitions,
+        validateTimestamp(pollingMode, timestamp),
+        offsets
+    );
+  }
+
+  private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long ts) {
+    if (pollingMode == PollingModeDTO.FROM_TIMESTAMP || pollingMode == PollingModeDTO.TO_TIMESTAMP) {
+      if (ts == null) {
+        throw new ValidationException("timestamp not provided for " + pollingMode);
+      }
+    }
+    return ts;
+  }
+
+  private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode,
+                                                 @Nullable Long offset) {
+    if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) {
+      if (offset == null) {
+        throw new ValidationException("offsets not provided for " + pollingMode);
+      }
+      return new Offsets(offset, null);
+    }
+    return null;
+  }
+
 }

+ 7 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java

@@ -1,7 +1,6 @@
 package com.provectus.kafka.ui.serdes;
 
 import com.provectus.kafka.ui.model.TopicMessageDTO;
-import com.provectus.kafka.ui.model.TopicMessageDTO.TimestampTypeEnum;
 import com.provectus.kafka.ui.serde.api.Serde;
 import java.time.Instant;
 import java.time.OffsetDateTime;
@@ -9,7 +8,6 @@ import java.time.ZoneId;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.function.UnaryOperator;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -34,8 +32,6 @@ public class ConsumerRecordDeserializer {
   private final Serde.Deserializer fallbackKeyDeserializer;
   private final Serde.Deserializer fallbackValueDeserializer;
 
-  private final UnaryOperator<TopicMessageDTO> masker;
-
   public TopicMessageDTO deserialize(ConsumerRecord<Bytes, Bytes> rec) {
     var message = new TopicMessageDTO();
     fillKey(message, rec);
@@ -51,14 +47,14 @@ public class ConsumerRecordDeserializer {
     message.setValueSize(getValueSize(rec));
     message.setHeadersSize(getHeadersSize(rec));
 
-    return masker.apply(message);
+    return message;
   }
 
-  private static TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
+  private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
     return switch (timestampType) {
-      case CREATE_TIME -> TimestampTypeEnum.CREATE_TIME;
-      case LOG_APPEND_TIME -> TimestampTypeEnum.LOG_APPEND_TIME;
-      case NO_TIMESTAMP_TYPE -> TimestampTypeEnum.NO_TIMESTAMP_TYPE;
+      case CREATE_TIME -> TopicMessageDTO.TimestampTypeEnum.CREATE_TIME;
+      case LOG_APPEND_TIME -> TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME;
+      case NO_TIMESTAMP_TYPE -> TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
     };
   }
 
@@ -122,11 +118,11 @@ public class ConsumerRecordDeserializer {
   }
 
   private static Long getKeySize(ConsumerRecord<Bytes, Bytes> consumerRecord) {
-    return consumerRecord.key() != null ? (long) consumerRecord.serializedKeySize() : null;
+    return consumerRecord.key() != null ? (long) consumerRecord.key().get().length : null;
   }
 
   private static Long getValueSize(ConsumerRecord<Bytes, Bytes> consumerRecord) {
-    return consumerRecord.value() != null ? (long) consumerRecord.serializedValueSize() : null;
+    return consumerRecord.value() != null ? (long) consumerRecord.value().get().length : null;
   }
 
   private static int headerSize(Header header) {

+ 109 - 44
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java

@@ -1,8 +1,13 @@
 package com.provectus.kafka.ui.service;
 
+import com.google.common.base.Charsets;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.hash.Hashing;
 import com.google.common.util.concurrent.RateLimiter;
 import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.emitter.BackwardEmitter;
+import com.provectus.kafka.ui.emitter.Cursor;
 import com.provectus.kafka.ui.emitter.ForwardEmitter;
 import com.provectus.kafka.ui.emitter.MessageFilters;
 import com.provectus.kafka.ui.emitter.TailingEmitter;
@@ -11,12 +16,12 @@ import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
-import com.provectus.kafka.ui.model.SeekDirectionDTO;
+import com.provectus.kafka.ui.model.PollingModeDTO;
 import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
 import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
 import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
 import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
 import com.provectus.kafka.ui.util.SslPropertiesUtil;
 import java.time.Instant;
@@ -27,12 +32,12 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.function.Predicate;
 import java.util.function.UnaryOperator;
 import java.util.stream.Collectors;
 import javax.annotation.Nullable;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.StringUtils;
 import org.apache.kafka.clients.admin.OffsetSpec;
 import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.clients.producer.KafkaProducer;
@@ -50,8 +55,11 @@ import reactor.core.scheduler.Schedulers;
 @Slf4j
 public class MessagesService {
 
+  private static final long SALT_FOR_HASHING = ThreadLocalRandom.current().nextLong();
+
   private static final int DEFAULT_MAX_PAGE_SIZE = 500;
   private static final int DEFAULT_PAGE_SIZE = 100;
+
   // limiting UI messages rate to 20/sec in tailing mode
   private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
 
@@ -61,6 +69,12 @@ public class MessagesService {
   private final int maxPageSize;
   private final int defaultPageSize;
 
+  private final Cache<String, Predicate<TopicMessageDTO>> registeredFilters = CacheBuilder.newBuilder()
+      .maximumSize(PollingCursorsStorage.MAX_SIZE)
+      .build();
+
+  private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
+
   public MessagesService(AdminClientService adminClientService,
                          DeserializationService deserializationService,
                          ConsumerGroupService consumerGroupService,
@@ -86,10 +100,7 @@ public class MessagesService {
   public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
     Predicate<TopicMessageDTO> predicate;
     try {
-      predicate = MessageFilters.createMsgFilter(
-          execData.getFilterCode(),
-          MessageFilterTypeDTO.GROOVY_SCRIPT
-      );
+      predicate = MessageFilters.groovyScriptFilter(execData.getFilterCode());
     } catch (Exception e) {
       log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
       return new SmartFilterTestExecutionResultDTO()
@@ -197,67 +208,103 @@ public class MessagesService {
     return new KafkaProducer<>(properties);
   }
 
-  public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
+  public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
+                                                 String topic,
                                                  ConsumerPosition consumerPosition,
-                                                 @Nullable String query,
-                                                 MessageFilterTypeDTO filterQueryType,
-                                                 @Nullable Integer pageSize,
-                                                 SeekDirectionDTO seekDirection,
+                                                 @Nullable String containsStringFilter,
+                                                 @Nullable String filterId,
+                                                 @Nullable Integer limit,
                                                  @Nullable String keySerde,
                                                  @Nullable String valueSerde) {
+    return loadMessages(
+        cluster,
+        topic,
+        deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
+        consumerPosition,
+        getMsgFilter(containsStringFilter, filterId),
+        fixPageSize(limit)
+    );
+  }
+
+  public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic, String cursorId) {
+    Cursor cursor = cursorsStorage.getCursor(cursorId)
+        .orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache."));
+    return loadMessages(
+        cluster,
+        topic,
+        cursor.deserializer(),
+        cursor.consumerPosition(),
+        cursor.filter(),
+        cursor.limit()
+    );
+  }
+
+  private Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
+                                                  String topic,
+                                                  ConsumerRecordDeserializer deserializer,
+                                                  ConsumerPosition consumerPosition,
+                                                  Predicate<TopicMessageDTO> filter,
+                                                  int limit) {
     return withExistingTopic(cluster, topic)
         .flux()
         .publishOn(Schedulers.boundedElastic())
-        .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
-            filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
-  }
-
-  private int fixPageSize(@Nullable Integer pageSize) {
-    return Optional.ofNullable(pageSize)
-        .filter(ps -> ps > 0 && ps <= maxPageSize)
-        .orElse(defaultPageSize);
+        .flatMap(td -> loadMessagesImpl(cluster, deserializer, consumerPosition, filter, limit));
   }
 
   private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
-                                                      String topic,
+                                                      ConsumerRecordDeserializer deserializer,
                                                       ConsumerPosition consumerPosition,
-                                                      @Nullable String query,
-                                                      MessageFilterTypeDTO filterQueryType,
-                                                      int limit,
-                                                      SeekDirectionDTO seekDirection,
-                                                      @Nullable String keySerde,
-                                                      @Nullable String valueSerde) {
-
-    var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
-    var filter = getMsgFilter(query, filterQueryType);
-    var emitter = switch (seekDirection) {
-      case FORWARD -> new ForwardEmitter(
+                                                      Predicate<TopicMessageDTO> filter,
+                                                      int limit) {
+    var emitter = switch (consumerPosition.pollingMode()) {
+      case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardEmitter(
           () -> consumerGroupService.createConsumer(cluster),
-          consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
+          consumerPosition,
+          limit,
+          deserializer,
+          filter,
+          cluster.getPollingSettings(),
+          cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
       );
-      case BACKWARD -> new BackwardEmitter(
+      case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardEmitter(
           () -> consumerGroupService.createConsumer(cluster),
-          consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
+          consumerPosition,
+          limit,
+          deserializer,
+          filter,
+          cluster.getPollingSettings(),
+          cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
       );
       case TAILING -> new TailingEmitter(
           () -> consumerGroupService.createConsumer(cluster),
-          consumerPosition, deserializer, filter, cluster.getPollingSettings()
+          consumerPosition,
+          deserializer,
+          filter,
+          cluster.getPollingSettings()
       );
     };
     return Flux.create(emitter)
-        .map(throttleUiPublish(seekDirection));
+        .map(throttleUiPublish(consumerPosition.pollingMode()));
   }
 
-  private Predicate<TopicMessageDTO> getMsgFilter(String query,
-                                                  MessageFilterTypeDTO filterQueryType) {
-    if (StringUtils.isEmpty(query)) {
-      return evt -> true;
+  private Predicate<TopicMessageDTO> getMsgFilter(@Nullable String containsStrFilter,
+                                                  @Nullable String smartFilterId) {
+    Predicate<TopicMessageDTO> messageFilter = MessageFilters.noop();
+    if (containsStrFilter != null) {
+      messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter));
     }
-    return MessageFilters.createMsgFilter(query, filterQueryType);
+    if (smartFilterId != null) {
+      var registered = registeredFilters.getIfPresent(smartFilterId);
+      if (registered == null) {
+        throw new ValidationException("No filter was registered with id " + smartFilterId);
+      }
+      messageFilter = messageFilter.and(registered);
+    }
+    return messageFilter;
   }
 
-  private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
-    if (seekDirection == SeekDirectionDTO.TAILING) {
+  private <T> UnaryOperator<T> throttleUiPublish(PollingModeDTO pollingMode) {
+    if (pollingMode == PollingModeDTO.TAILING) {
       RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
       return m -> {
         rateLimiter.acquire(1);
@@ -269,4 +316,22 @@ public class MessagesService {
     return UnaryOperator.identity();
   }
 
+  private int fixPageSize(@Nullable Integer pageSize) {
+    return Optional.ofNullable(pageSize)
+        .filter(ps -> ps > 0 && ps <= maxPageSize)
+        .orElse(defaultPageSize);
+  }
+
+  public String registerMessageFilter(String groovyCode) {
+    String saltedCode = groovyCode + SALT_FOR_HASHING;
+    String filterId = Hashing.sha256()
+        .hashString(saltedCode, Charsets.UTF_8)
+        .toString()
+        .substring(0, 8);
+    if (registeredFilters.getIfPresent(filterId) == null) {
+      registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode));
+    }
+    return filterId;
+  }
+
 }

+ 45 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/PollingCursorsStorage.java

@@ -0,0 +1,45 @@
+package com.provectus.kafka.ui.service;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.provectus.kafka.ui.emitter.Cursor;
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.TopicMessageDTO;
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import java.util.Map;
+import java.util.Optional;
+import java.util.function.Predicate;
+import org.apache.commons.lang3.RandomStringUtils;
+
+public class PollingCursorsStorage {
+
+  public static final int MAX_SIZE = 10_000;
+
+  private final Cache<String, Cursor> cursorsCache = CacheBuilder.newBuilder()
+      .maximumSize(MAX_SIZE)
+      .build();
+
+
+  public Cursor.Tracking createNewCursor(ConsumerRecordDeserializer deserializer,
+                                         ConsumerPosition originalPosition,
+                                         Predicate<TopicMessageDTO> filter,
+                                         int limit) {
+    return new Cursor.Tracking(deserializer, originalPosition, filter, limit, this::register);
+  }
+
+  public Optional<Cursor> getCursor(String id) {
+    return Optional.ofNullable(cursorsCache.getIfPresent(id));
+  }
+
+  public String register(Cursor cursor) {
+    var id = RandomStringUtils.random(8, true, true);
+    cursorsCache.put(id, cursor);
+    return id;
+  }
+
+  @VisibleForTesting
+  public Map<String, Cursor> asMap() {
+    return cursorsCache.asMap();
+  }
+}

+ 4 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/analyze/TopicAnalysisService.java

@@ -1,6 +1,6 @@
 package com.provectus.kafka.ui.service.analyze;
 
-import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
+import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
 
 import com.provectus.kafka.ui.emitter.EnhancedConsumer;
 import com.provectus.kafka.ui.emitter.SeekOperations;
@@ -14,6 +14,7 @@ import java.io.Closeable;
 import java.time.Duration;
 import java.time.Instant;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import lombok.RequiredArgsConstructor;
@@ -104,7 +105,8 @@ public class TopicAnalysisService {
         consumer.partitionsFor(topicId.topicName)
             .forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats()));
 
-        var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null));
+        var seekOperations =
+            SeekOperations.create(consumer, new ConsumerPosition(EARLIEST, topicId.topicName, List.of(), null, null));
         long summaryOffsetsRange = seekOperations.summaryOffsetsRange();
         seekOperations.assignAndSeekNonEmptyPartitions();
 

+ 79 - 79
kafka-ui-api/src/main/resources/application-local.yml

@@ -10,22 +10,22 @@ logging:
 #server:
 #  port: 8080 #- Port in which kafka-ui will run.
 
-spring:
-  jmx:
-    enabled: true
-  ldap:
-    urls: ldap://localhost:10389
-    base: "cn={0},ou=people,dc=planetexpress,dc=com"
-    admin-user: "cn=admin,dc=planetexpress,dc=com"
-    admin-password: "GoodNewsEveryone"
-    user-filter-search-base: "dc=planetexpress,dc=com"
-    user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))"
-    group-filter-search-base: "ou=people,dc=planetexpress,dc=com"
+#spring:
+#  jmx:
+#    enabled: true
+#  ldap:
+#    urls: ldap://localhost:10389
+#    base: "cn={0},ou=people,dc=planetexpress,dc=com"
+#    admin-user: "cn=admin,dc=planetexpress,dc=com"
+#    admin-password: "GoodNewsEveryone"
+#    user-filter-search-base: "dc=planetexpress,dc=com"
+#    user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))"
+#    group-filter-search-base: "ou=people,dc=planetexpress,dc=com"
 
 kafka:
   clusters:
     - name: local
-      bootstrapServers: localhost:9092
+      bootstrapServers: localhost:9096
       schemaRegistry: http://localhost:8085
       ksqldbServer: http://localhost:8088
       kafkaConnect:
@@ -80,70 +80,70 @@ auth:
         custom-params:
           type: github
 
-rbac:
-  roles:
-    - name: "memelords"
-      clusters:
-        - local
-      subjects:
-        - provider: oauth_google
-          type: domain
-          value: "provectus.com"
-        - provider: oauth_google
-          type: user
-          value: "name@provectus.com"
-
-        - provider: oauth_github
-          type: organization
-          value: "provectus"
-        - provider: oauth_github
-          type: user
-          value: "memelord"
-
-        - provider: oauth_cognito
-          type: user
-          value: "username"
-        - provider: oauth_cognito
-          type: group
-          value: "memelords"
-
-        - provider: ldap
-          type: group
-          value: "admin_staff"
-
-        # NOT IMPLEMENTED YET
-      #        - provider: ldap_ad
-      #          type: group
-      #          value: "admin_staff"
-
-      permissions:
-        - resource: applicationconfig
-          actions: all
-
-        - resource: clusterconfig
-          actions: all
-
-        - resource: topic
-          value: ".*"
-          actions: all
-
-        - resource: consumer
-          value: ".*"
-          actions: all
-
-        - resource: schema
-          value: ".*"
-          actions: all
-
-        - resource: connect
-          value: "*"
-          actions: all
-
-        - resource: ksql
-          actions: all
-
-        - resource: acl
-          actions: all
-
-        - resource: audit
-          actions: all
+#rbac:
+#  roles:
+#    - name: "memelords"
+#      clusters:
+#        - local
+#      subjects:
+#        - provider: oauth_google
+#          type: domain
+#          value: "provectus.com"
+#        - provider: oauth_google
+#          type: user
+#          value: "name@provectus.com"
+#
+#        - provider: oauth_github
+#          type: organization
+#          value: "provectus"
+#        - provider: oauth_github
+#          type: user
+#          value: "memelord"
+#
+#        - provider: oauth_cognito
+#          type: user
+#          value: "username"
+#        - provider: oauth_cognito
+#          type: group
+#          value: "memelords"
+#
+#        - provider: ldap
+#          type: group
+#          value: "admin_staff"
+#
+#        # NOT IMPLEMENTED YET
+#      #        - provider: ldap_ad
+#      #          type: group
+#      #          value: "admin_staff"
+#
+#      permissions:
+#        - resource: applicationconfig
+#          actions: all
+#
+#        - resource: clusterconfig
+#          actions: all
+#
+#        - resource: topic
+#          value: ".*"
+#          actions: all
+#
+#        - resource: consumer
+#          value: ".*"
+#          actions: all
+#
+#        - resource: schema
+#          value: ".*"
+#          actions: all
+#
+#        - resource: connect
+#          value: "*"
+#          actions: all
+#
+#        - resource: ksql
+#          actions: all
+#
+#        - resource: acl
+#          actions: all
+#
+#        - resource: audit
+#          actions: all

+ 103 - 15
kafka-ui-api/src/main/resources/application.yml

@@ -1,21 +1,109 @@
-auth:
-  type: DISABLED
-
-management:
-  endpoint:
-    info:
-      enabled: true
-    health:
-      enabled: true
-  endpoints:
-    web:
-      exposure:
-        include: "info,health,prometheus"
-
 logging:
   level:
     root: INFO
     com.provectus: DEBUG
+    #org.springframework.http.codec.json.Jackson2JsonEncoder: DEBUG
+    #org.springframework.http.codec.json.Jackson2JsonDecoder: DEBUG
     reactor.netty.http.server.AccessLog: INFO
-    org.hibernate.validator: WARN
+    org.springframework.security: DEBUG
+
+#server:
+#  port: 8080 #- Port in which kafka-ui will run.
+
+#spring:
+#  jmx:
+#    enabled: true
+#  ldap:
+#    urls: ldap://localhost:10389
+#    base: "cn={0},ou=people,dc=planetexpress,dc=com"
+#    admin-user: "cn=admin,dc=planetexpress,dc=com"
+#    admin-password: "GoodNewsEveryone"
+#    user-filter-search-base: "dc=planetexpress,dc=com"
+#    user-filter-search-filter: "(&(uid={0})(objectClass=inetOrgPerson))"
+#    group-filter-search-base: "ou=people,dc=planetexpress,dc=com"
+
+kafka:
+  clusters:
+    - name: local
+      bootstrapServers: localhost:9096
+#      schemaRegistry: http://localhost:8085
+#      ksqldbServer: http://localhost:8088
+#      kafkaConnect:
+#        - name: first
+#          address: http://localhost:8083
+#      metrics:
+#        port: 9997
+#        type: JMX
+
+auth:
+  type: DISABLED
+
+dynamic.config.enabled: true
 
+#rbac:
+#  roles:
+#    - name: "memelords"
+#      clusters:
+#        - local
+#      subjects:
+#        - provider: oauth_google
+#          type: domain
+#          value: "provectus.com"
+#        - provider: oauth_google
+#          type: user
+#          value: "name@provectus.com"
+#
+#        - provider: oauth_github
+#          type: organization
+#          value: "provectus"
+#        - provider: oauth_github
+#          type: user
+#          value: "memelord"
+#
+#        - provider: oauth_cognito
+#          type: user
+#          value: "username"
+#        - provider: oauth_cognito
+#          type: group
+#          value: "memelords"
+#
+#        - provider: ldap
+#          type: group
+#          value: "admin_staff"
+#
+#        # NOT IMPLEMENTED YET
+#      #        - provider: ldap_ad
+#      #          type: group
+#      #          value: "admin_staff"
+#
+#      permissions:
+#        - resource: applicationconfig
+#          actions: all
+#
+#        - resource: clusterconfig
+#          actions: all
+#
+#        - resource: topic
+#          value: ".*"
+#          actions: all
+#
+#        - resource: consumer
+#          value: ".*"
+#          actions: all
+#
+#        - resource: schema
+#          value: ".*"
+#          actions: all
+#
+#        - resource: connect
+#          value: "*"
+#          actions: all
+#
+#        - resource: ksql
+#          actions: all
+#
+#        - resource: acl
+#          actions: all
+#
+#        - resource: audit
+#          actions: all

+ 1 - 1
kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java

@@ -56,7 +56,7 @@ public class KafkaConsumerTests extends AbstractIntegrationTest {
     }
 
     long count = webTestClient.get()
-        .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
+        .uri("/api/clusters/{clusterName}/topics/{topicName}/messages/v2?m=EARLIEST", LOCAL, topicName)
         .accept(TEXT_EVENT_STREAM)
         .exchange()
         .expectStatus()

+ 195 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/CursorTest.java

@@ -0,0 +1,195 @@
+package com.provectus.kafka.ui.emitter;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.AbstractIntegrationTest;
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.PollingModeDTO;
+import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import com.provectus.kafka.ui.producer.KafkaTestProducer;
+import com.provectus.kafka.ui.serde.api.Serde;
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
+import com.provectus.kafka.ui.serdes.builtin.StringSerde;
+import com.provectus.kafka.ui.service.PollingCursorsStorage;
+import com.provectus.kafka.ui.util.ApplicationMetrics;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.function.Consumer;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import reactor.core.publisher.Flux;
+import reactor.test.StepVerifier;
+
+class CursorTest extends AbstractIntegrationTest {
+
+  static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID();
+  static final int MSGS_IN_PARTITION = 20;
+  static final int PAGE_SIZE = (MSGS_IN_PARTITION / 2) + 1; //to poll fill data set in 2 iterations
+
+  final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
+
+  @BeforeAll
+  static void setup() {
+    createTopic(new NewTopic(TOPIC, 1, (short) 1));
+    try (var producer = KafkaTestProducer.forKafka(kafka)) {
+      for (int i = 0; i < MSGS_IN_PARTITION; i++) {
+        producer.send(new ProducerRecord<>(TOPIC, "msg_" + i));
+      }
+    }
+  }
+
+  @AfterAll
+  static void cleanup() {
+    deleteTopic(TOPIC);
+  }
+
+  @Test
+  void backwardEmitter() {
+    var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null);
+    var emitter = createBackwardEmitter(consumerPosition);
+    emitMessages(emitter, PAGE_SIZE);
+    var cursor = assertCursor(
+        PollingModeDTO.TO_OFFSET,
+        offsets -> assertThat(offsets)
+            .hasSize(1)
+            .containsEntry(new TopicPartition(TOPIC, 0), 9L)
+    );
+
+    // polling remaining records using registered cursor
+    emitter = createBackwardEmitterWithCursor(cursor);
+    emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE);
+    //checking no new cursors registered
+    assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor);
+  }
+
+  @Test
+  void forwardEmitter() {
+    var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null);
+    var emitter = createForwardEmitter(consumerPosition);
+    emitMessages(emitter, PAGE_SIZE);
+    var cursor = assertCursor(
+        PollingModeDTO.FROM_OFFSET,
+        offsets -> assertThat(offsets)
+            .hasSize(1)
+            .containsEntry(new TopicPartition(TOPIC, 0), 11L)
+    );
+
+    //polling remaining records using registered cursor
+    emitter = createForwardEmitterWithCursor(cursor);
+    emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE);
+    //checking no new cursors registered
+    assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor);
+  }
+
+  private Cursor assertCursor(PollingModeDTO expectedMode,
+                              Consumer<Map<TopicPartition, Long>> offsetsAssert) {
+    Cursor registeredCursor = cursorsStorage.asMap().values().stream().findFirst().orElse(null);
+    assertThat(registeredCursor).isNotNull();
+    assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE);
+    assertThat(registeredCursor.deserializer()).isNotNull();
+    assertThat(registeredCursor.filter()).isNotNull();
+
+    var cursorPosition = registeredCursor.consumerPosition();
+    assertThat(cursorPosition).isNotNull();
+    assertThat(cursorPosition.topic()).isEqualTo(TOPIC);
+    assertThat(cursorPosition.partitions()).isEqualTo(List.of());
+    assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode);
+
+    offsetsAssert.accept(cursorPosition.offsets().tpOffsets());
+    return registeredCursor;
+  }
+
+  private void emitMessages(AbstractEmitter emitter, int expectedCnt) {
+    StepVerifier.create(
+            Flux.create(emitter)
+                .filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
+                .map(e -> e.getMessage().getContent())
+        )
+        .expectNextCount(expectedCnt)
+        .verifyComplete();
+  }
+
+  private BackwardEmitter createBackwardEmitter(ConsumerPosition position) {
+    return new BackwardEmitter(
+        this::createConsumer,
+        position,
+        PAGE_SIZE,
+        createRecordsDeserializer(),
+        m -> true,
+        PollingSettings.createDefault(),
+        createCursor(position)
+    );
+  }
+
+  private BackwardEmitter createBackwardEmitterWithCursor(Cursor cursor) {
+    return new BackwardEmitter(
+        this::createConsumer,
+        cursor.consumerPosition(),
+        cursor.limit(),
+        cursor.deserializer(),
+        cursor.filter(),
+        PollingSettings.createDefault(),
+        createCursor(cursor.consumerPosition())
+    );
+  }
+
+  private ForwardEmitter createForwardEmitterWithCursor(Cursor cursor) {
+    return new ForwardEmitter(
+        this::createConsumer,
+        cursor.consumerPosition(),
+        cursor.limit(),
+        cursor.deserializer(),
+        cursor.filter(),
+        PollingSettings.createDefault(),
+        createCursor(cursor.consumerPosition())
+    );
+  }
+
+  private ForwardEmitter createForwardEmitter(ConsumerPosition position) {
+    return new ForwardEmitter(
+        this::createConsumer,
+        position,
+        PAGE_SIZE,
+        createRecordsDeserializer(),
+        m -> true,
+        PollingSettings.createDefault(),
+        createCursor(position)
+    );
+  }
+
+  private Cursor.Tracking createCursor(ConsumerPosition position) {
+    return cursorsStorage.createNewCursor(createRecordsDeserializer(), position, m -> true, PAGE_SIZE);
+  }
+
+  private EnhancedConsumer createConsumer() {
+    Properties props = new Properties();
+    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
+    props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
+    props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1); // to check multiple polls
+    return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop());
+  }
+
+  private static ConsumerRecordDeserializer createRecordsDeserializer() {
+    Serde s = new StringSerde();
+    s.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty());
+    return new ConsumerRecordDeserializer(
+        StringSerde.name(),
+        s.deserializer(null, Serde.Target.KEY),
+        StringSerde.name(),
+        s.deserializer(null, Serde.Target.VALUE),
+        StringSerde.name(),
+        s.deserializer(null, Serde.Target.KEY),
+        s.deserializer(null, Serde.Target.VALUE),
+        msg -> msg
+    );
+  }
+
+}

+ 1 - 1
kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/MessageFiltersTest.java

@@ -51,7 +51,7 @@ class MessageFiltersTest {
           filter.test(msg().key(null).content(null))
       );
 
-      assertFalse(
+      assertTrue(
           filter.test(msg().key("aBc").content("AbC"))
       );
     }

+ 35 - 14
kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/SeekOperationsTest.java

@@ -1,8 +1,13 @@
 package com.provectus.kafka.ui.emitter;
 
+import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
+import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST;
+import static com.provectus.kafka.ui.model.PollingModeDTO.TAILING;
 import static org.assertj.core.api.Assertions.assertThat;
 
-import com.provectus.kafka.ui.model.SeekTypeDTO;
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.PollingModeDTO;
+import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -14,6 +19,8 @@ import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Nested;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
 
 class SeekOperationsTest {
 
@@ -40,13 +47,22 @@ class SeekOperationsTest {
   @Nested
   class GetOffsetsForSeek {
 
+    @Test
+    void tailing() {
+      var offsets = SeekOperations.getOffsetsForSeek(
+          consumer,
+          new OffsetsInfo(consumer, topic),
+          new ConsumerPosition(TAILING, topic, List.of(), null, null)
+      );
+      assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
+    }
+
     @Test
     void latest() {
       var offsets = SeekOperations.getOffsetsForSeek(
           consumer,
           new OffsetsInfo(consumer, topic),
-          SeekTypeDTO.LATEST,
-          null
+          new ConsumerPosition(LATEST, topic, List.of(), null, null)
       );
       assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L));
     }
@@ -56,33 +72,38 @@ class SeekOperationsTest {
       var offsets = SeekOperations.getOffsetsForSeek(
           consumer,
           new OffsetsInfo(consumer, topic),
-          SeekTypeDTO.BEGINNING,
-          null
+          new ConsumerPosition(EARLIEST, topic, List.of(), null, null)
       );
       assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L));
     }
 
-    @Test
-    void offsets() {
+    @ParameterizedTest
+    @CsvSource({"TO_OFFSET", "FROM_OFFSET"})
+    void offsets(PollingModeDTO mode) {
       var offsets = SeekOperations.getOffsetsForSeek(
           consumer,
           new OffsetsInfo(consumer, topic),
-          SeekTypeDTO.OFFSET,
-          Map.of(tp1, 10L, tp2, 10L, tp3, 26L)
+          new ConsumerPosition(
+              mode, topic, List.of(tp1, tp2, tp3), null,
+              new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 10L, tp3, 26L))
+          )
       );
       assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L));
     }
 
-    @Test
-    void offsetsWithBoundsFixing() {
+    @ParameterizedTest
+    @CsvSource({"TO_OFFSET", "FROM_OFFSET"})
+    void offsetsWithBoundsFixing(PollingModeDTO mode) {
       var offsets = SeekOperations.getOffsetsForSeek(
           consumer,
           new OffsetsInfo(consumer, topic),
-          SeekTypeDTO.OFFSET,
-          Map.of(tp1, 10L, tp2, 21L, tp3, 24L)
+          new ConsumerPosition(
+              mode, topic, List.of(tp1, tp2, tp3), null,
+              new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 21L, tp3, 24L))
+          )
       );
       assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L));
     }
   }
 
-}
+}

+ 6 - 8
kafka-ui-api/src/test/java/com/provectus/kafka/ui/emitter/TailingEmitterTest.java

@@ -4,10 +4,9 @@ import static org.assertj.core.api.Assertions.assertThat;
 
 import com.provectus.kafka.ui.AbstractIntegrationTest;
 import com.provectus.kafka.ui.model.ConsumerPosition;
-import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
-import com.provectus.kafka.ui.model.SeekDirectionDTO;
-import com.provectus.kafka.ui.model.SeekTypeDTO;
+import com.provectus.kafka.ui.model.PollingModeDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import com.provectus.kafka.ui.serdes.builtin.StringSerde;
 import com.provectus.kafka.ui.service.ClustersStorage;
 import com.provectus.kafka.ui.service.MessagesService;
 import java.time.Duration;
@@ -111,13 +110,12 @@ class TailingEmitterTest extends AbstractIntegrationTest {
 
     return applicationContext.getBean(MessagesService.class)
         .loadMessages(cluster, topicName,
-            new ConsumerPosition(SeekTypeDTO.LATEST, topic, null),
+            new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null),
             query,
-            MessageFilterTypeDTO.STRING_CONTAINS,
+            null,
             0,
-            SeekDirectionDTO.TAILING,
-            "String",
-            "String");
+            StringSerde.name(),
+            StringSerde.name());
   }
 
   private List<TopicMessageEventDTO> startTailing(String filterQuery) {

+ 86 - 56
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java

@@ -8,19 +8,24 @@ import com.provectus.kafka.ui.exception.TopicNotFoundException;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.SeekDirectionDTO;
-import com.provectus.kafka.ui.model.SeekTypeDTO;
+import com.provectus.kafka.ui.model.PollingModeDTO;
 import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
 import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
 import com.provectus.kafka.ui.serdes.builtin.StringSerde;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicReference;
 import org.apache.kafka.clients.admin.NewTopic;
+import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.CsvSource;
 import org.springframework.beans.factory.annotation.Autowired;
 import reactor.core.publisher.Flux;
 import reactor.test.StepVerifier;
@@ -35,6 +40,8 @@ class MessagesServiceTest extends AbstractIntegrationTest {
 
   KafkaCluster cluster;
 
+  Set<String> createdTopics = new HashSet<>();
+
   @BeforeEach
   void init() {
     cluster = applicationContext
@@ -43,6 +50,11 @@ class MessagesServiceTest extends AbstractIntegrationTest {
         .get();
   }
 
+  @AfterEach
+  void deleteCreatedTopics() {
+    createdTopics.forEach(MessagesServiceTest::deleteTopic);
+  }
+
   @Test
   void deleteTopicMessagesReturnsExceptionWhenTopicNotFound() {
     StepVerifier.create(messagesService.deleteTopicMessages(cluster, NON_EXISTING_TOPIC, List.of()))
@@ -60,7 +72,9 @@ class MessagesServiceTest extends AbstractIntegrationTest {
   @Test
   void loadMessagesReturnsExceptionWhenTopicNotFound() {
     StepVerifier.create(messagesService
-            .loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String"))
+            .loadMessages(cluster, NON_EXISTING_TOPIC,
+                new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null),
+                null, null, 1, "String", "String"))
         .expectError(TopicNotFoundException.class)
         .verify();
   }
@@ -68,68 +82,84 @@ class MessagesServiceTest extends AbstractIntegrationTest {
   @Test
   void maskingAppliedOnConfiguredClusters() throws Exception {
     String testTopic = MASKED_TOPICS_PREFIX + UUID.randomUUID();
+    createTopicWithCleanup(new NewTopic(testTopic, 1, (short) 1));
+
     try (var producer = KafkaTestProducer.forKafka(kafka)) {
-      createTopic(new NewTopic(testTopic, 1, (short) 1));
       producer.send(testTopic, "message1");
       producer.send(testTopic, "message2").get();
-
-      Flux<TopicMessageDTO> msgsFlux = messagesService.loadMessages(
-          cluster,
-          testTopic,
-          new ConsumerPosition(SeekTypeDTO.BEGINNING, testTopic, null),
-          null,
-          null,
-          100,
-          SeekDirectionDTO.FORWARD,
-          StringSerde.name(),
-          StringSerde.name()
-      ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
-          .map(TopicMessageEventDTO::getMessage);
-
-      // both messages should be masked
-      StepVerifier.create(msgsFlux)
-          .expectNextMatches(msg -> msg.getContent().equals("***"))
-          .expectNextMatches(msg -> msg.getContent().equals("***"))
-          .verifyComplete();
-    } finally {
-      deleteTopic(testTopic);
     }
-  }
 
-  @Test
-  void execSmartFilterTestReturnsExecutionResult() {
-    var params = new SmartFilterTestExecutionDTO()
-        .filterCode("key != null && value != null && headers != null && timestampMs != null && offset != null")
-        .key("1234")
-        .value("{ \"some\" : \"value\" } ")
-        .headers(Map.of("h1", "hv1"))
-        .offset(12345L)
-        .timestampMs(System.currentTimeMillis())
-        .partition(1);
-    assertThat(execSmartFilterTest(params).getResult()).isTrue();
-
-    params.setFilterCode("return false");
-    assertThat(execSmartFilterTest(params).getResult()).isFalse();
+    Flux<TopicMessageDTO> msgsFlux = messagesService.loadMessages(
+            cluster,
+            testTopic,
+            new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null),
+            null,
+            null,
+            100,
+            StringSerde.name(),
+            StringSerde.name()
+        ).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
+        .map(TopicMessageEventDTO::getMessage);
+
+    // both messages should be masked
+    StepVerifier.create(msgsFlux)
+        .expectNextMatches(msg -> msg.getContent().equals("***"))
+        .expectNextMatches(msg -> msg.getContent().equals("***"))
+        .verifyComplete();
   }
 
-  @Test
-  void execSmartFilterTestReturnsErrorOnFilterApplyError() {
-    var result = execSmartFilterTest(
-        new SmartFilterTestExecutionDTO()
-            .filterCode("return 1/0")
-    );
-    assertThat(result.getResult()).isNull();
-    assertThat(result.getError()).containsIgnoringCase("execution error");
+  @ParameterizedTest
+  @CsvSource({"EARLIEST", "LATEST"})
+  void cursorIsRegisteredAfterPollingIsDoneAndCanBeUsedForNextPagePolling(PollingModeDTO mode) {
+    String testTopic = MessagesServiceTest.class.getSimpleName() + UUID.randomUUID();
+    createTopicWithCleanup(new NewTopic(testTopic, 5, (short) 1));
+
+    int msgsToGenerate = 100;
+    int pageSize = (msgsToGenerate / 2) + 1;
+
+    try (var producer = KafkaTestProducer.forKafka(kafka)) {
+      for (int i = 0; i < msgsToGenerate; i++) {
+        producer.send(testTopic, "message_" + i);
+      }
+    }
+
+    var cursorIdCatcher = new AtomicReference<String>();
+    Flux<String> msgsFlux = messagesService.loadMessages(
+            cluster, testTopic,
+            new ConsumerPosition(mode, testTopic, List.of(), null, null),
+            null, null, pageSize, StringSerde.name(), StringSerde.name())
+        .doOnNext(evt -> {
+          if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) {
+            assertThat(evt.getCursor()).isNotNull();
+            cursorIdCatcher.set(evt.getCursor().getId());
+          }
+        })
+        .filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
+        .map(evt -> evt.getMessage().getContent());
+
+    StepVerifier.create(msgsFlux)
+        .expectNextCount(pageSize)
+        .verifyComplete();
+
+    assertThat(cursorIdCatcher.get()).isNotNull();
+
+    Flux<String> remainingMsgs = messagesService.loadMessages(cluster, testTopic, cursorIdCatcher.get())
+        .doOnNext(evt -> {
+          if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) {
+            assertThat(evt.getCursor()).isNull();
+          }
+        })
+        .filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
+        .map(evt -> evt.getMessage().getContent());
+
+    StepVerifier.create(remainingMsgs)
+        .expectNextCount(msgsToGenerate - pageSize)
+        .verifyComplete();
   }
 
-  @Test
-  void execSmartFilterTestReturnsErrorOnFilterCompilationError() {
-    var result = execSmartFilterTest(
-        new SmartFilterTestExecutionDTO()
-            .filterCode("this is invalid groovy syntax = 1")
-    );
-    assertThat(result.getResult()).isNull();
-    assertThat(result.getError()).containsIgnoringCase("Compilation error");
+  private void createTopicWithCleanup(NewTopic newTopic) {
+    createTopic(newTopic);
+    createdTopics.add(newTopic.name());
   }
 
 }

+ 63 - 50
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java

@@ -1,13 +1,16 @@
 package com.provectus.kafka.ui.service;
 
-import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
-import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST;
-import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
-import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
+import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
+import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_OFFSET;
+import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_TIMESTAMP;
+import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST;
+import static com.provectus.kafka.ui.model.PollingModeDTO.TO_OFFSET;
+import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
 import static org.assertj.core.api.Assertions.assertThat;
 
 import com.provectus.kafka.ui.AbstractIntegrationTest;
 import com.provectus.kafka.ui.emitter.BackwardEmitter;
+import com.provectus.kafka.ui.emitter.Cursor;
 import com.provectus.kafka.ui.emitter.EnhancedConsumer;
 import com.provectus.kafka.ui.emitter.ForwardEmitter;
 import com.provectus.kafka.ui.emitter.PollingSettings;
@@ -43,6 +46,7 @@ import org.apache.kafka.common.header.internals.RecordHeader;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.FluxSink;
 import reactor.test.StepVerifier;
@@ -57,16 +61,18 @@ class RecordEmitterTest extends AbstractIntegrationTest {
   static final String EMPTY_TOPIC = TOPIC + "_empty";
   static final List<Record> SENT_RECORDS = new ArrayList<>();
   static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer();
+  static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class);
   static final Predicate<TopicMessageDTO> NOOP_FILTER = m -> true;
 
   @BeforeAll
   static void generateMsgs() throws Exception {
     createTopic(new NewTopic(TOPIC, PARTITIONS, (short) 1));
     createTopic(new NewTopic(EMPTY_TOPIC, PARTITIONS, (short) 1));
+    long startTs = System.currentTimeMillis();
     try (var producer = KafkaTestProducer.forKafka(kafka)) {
       for (int partition = 0; partition < PARTITIONS; partition++) {
         for (int i = 0; i < MSGS_PER_PARTITION; i++) {
-          long ts = System.currentTimeMillis() + i;
+          long ts = (startTs += 100);
           var value = "msg_" + partition + "_" + i;
           var metadata = producer.send(
               new ProducerRecord<>(
@@ -115,20 +121,22 @@ class RecordEmitterTest extends AbstractIntegrationTest {
   void pollNothingOnEmptyTopic() {
     var forwardEmitter = new ForwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
+        new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
         100,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     var backwardEmitter = new BackwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
+        new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
         100,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     StepVerifier.create(Flux.create(forwardEmitter))
@@ -148,20 +156,22 @@ class RecordEmitterTest extends AbstractIntegrationTest {
   void pollFullTopicFromBeginning() {
     var forwardEmitter = new ForwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(BEGINNING, TOPIC, null),
+        new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null),
         PARTITIONS * MSGS_PER_PARTITION,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     var backwardEmitter = new BackwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(LATEST, TOPIC, null),
+        new ConsumerPosition(LATEST, TOPIC, List.of(), null, null),
         PARTITIONS * MSGS_PER_PARTITION,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
@@ -180,20 +190,24 @@ class RecordEmitterTest extends AbstractIntegrationTest {
 
     var forwardEmitter = new ForwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
+        new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
+            new ConsumerPosition.Offsets(null, targetOffsets)),
         PARTITIONS * MSGS_PER_PARTITION,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     var backwardEmitter = new BackwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
+        new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
+            new ConsumerPosition.Offsets(null, targetOffsets)),
         PARTITIONS * MSGS_PER_PARTITION,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     var expectedValues = SENT_RECORDS.stream()
@@ -213,50 +227,45 @@ class RecordEmitterTest extends AbstractIntegrationTest {
 
   @Test
   void pollWithTimestamps() {
-    Map<TopicPartition, Long> targetTimestamps = new HashMap<>();
-    final Map<TopicPartition, List<Record>> perPartition =
-        SENT_RECORDS.stream().collect(Collectors.groupingBy((r) -> r.tp));
-    for (int i = 0; i < PARTITIONS; i++) {
-      final List<Record> records = perPartition.get(new TopicPartition(TOPIC, i));
-      int randRecordIdx = ThreadLocalRandom.current().nextInt(records.size());
-      log.info("partition: {} position: {}", i, randRecordIdx);
-      targetTimestamps.put(
-          new TopicPartition(TOPIC, i),
-          records.get(randRecordIdx).getTimestamp()
-      );
-    }
+    var tsStats = SENT_RECORDS.stream().mapToLong(Record::getTimestamp).summaryStatistics();
+    //choosing ts in the middle
+    long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2);
 
     var forwardEmitter = new ForwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
+        new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
         PARTITIONS * MSGS_PER_PARTITION,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
+    );
+
+    expectEmitter(
+        forwardEmitter,
+        SENT_RECORDS.stream()
+            .filter(r -> r.getTimestamp() >= targetTimestamp)
+            .map(Record::getValue)
+            .collect(Collectors.toList())
     );
 
     var backwardEmitter = new BackwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
+        new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
         PARTITIONS * MSGS_PER_PARTITION,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
-    var expectedValues = SENT_RECORDS.stream()
-        .filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp()))
-        .map(Record::getValue)
-        .collect(Collectors.toList());
-
-    expectEmitter(forwardEmitter, expectedValues);
-
-    expectedValues = SENT_RECORDS.stream()
-        .filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp()))
-        .map(Record::getValue)
-        .collect(Collectors.toList());
-
-    expectEmitter(backwardEmitter, expectedValues);
+    expectEmitter(
+        backwardEmitter,
+        SENT_RECORDS.stream()
+            .filter(r -> r.getTimestamp() < targetTimestamp)
+            .map(Record::getValue)
+            .collect(Collectors.toList())
+    );
   }
 
   @Test
@@ -269,11 +278,13 @@ class RecordEmitterTest extends AbstractIntegrationTest {
 
     var backwardEmitter = new BackwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
+        new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
+            new ConsumerPosition.Offsets(null, targetOffsets)),
         numMessages,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     var expectedValues = SENT_RECORDS.stream()
@@ -296,11 +307,13 @@ class RecordEmitterTest extends AbstractIntegrationTest {
 
     var backwardEmitter = new BackwardEmitter(
         this::createConsumer,
-        new ConsumerPosition(OFFSET, TOPIC, offsets),
+        new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null,
+            new ConsumerPosition.Offsets(null, offsets)),
         100,
         RECORD_DESERIALIZER,
         NOOP_FILTER,
-        PollingSettings.createDefault()
+        PollingSettings.createDefault(),
+        CURSOR_MOCK
     );
 
     expectEmitter(backwardEmitter,

+ 3 - 8
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java

@@ -7,8 +7,7 @@ import com.provectus.kafka.ui.AbstractIntegrationTest;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.SeekDirectionDTO;
-import com.provectus.kafka.ui.model.SeekTypeDTO;
+import com.provectus.kafka.ui.model.PollingModeDTO;
 import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
@@ -20,6 +19,7 @@ import io.confluent.kafka.schemaregistry.avro.AvroSchema;
 import io.confluent.kafka.schemaregistry.json.JsonSchema;
 import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
 import java.time.Duration;
+import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.UUID;
@@ -500,15 +500,10 @@ public class SendAndReadTests extends AbstractIntegrationTest {
         TopicMessageDTO polled = messagesService.loadMessages(
                 targetCluster,
                 topic,
-                new ConsumerPosition(
-                    SeekTypeDTO.BEGINNING,
-                    topic,
-                    Map.of(new TopicPartition(topic, 0), 0L)
-                ),
+                new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null),
                 null,
                 null,
                 1,
-                SeekDirectionDTO.FORWARD,
                 msgToSend.getKeySerde().get(),
                 msgToSend.getValueSerde().get()
             ).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))

+ 149 - 458
kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml

@@ -625,25 +625,6 @@ paths:
               schema:
                 $ref: '#/components/schemas/TopicSerdeSuggestion'
 
-  /api/smartfilters/testexecutions:
-    put:
-      tags:
-        - Messages
-      summary: executeSmartFilterTest
-      operationId: executeSmartFilterTest
-      requestBody:
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/SmartFilterTestExecution'
-      responses:
-        200:
-          description: OK
-          content:
-            application/json:
-              schema:
-                $ref: '#/components/schemas/SmartFilterTestExecutionResult'
-
 
   /api/clusters/{clusterName}/topics/{topicName}/messages:
     get:
@@ -763,6 +744,119 @@ paths:
         404:
           description: Not found
 
+  /api/clusters/{clusterName}/topics/{topicName}/smartfilters:
+    post:
+      tags:
+        - Messages
+      summary: registerFilter
+      operationId: registerFilter
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/MessageFilterRegistration'
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/MessageFilterId'
+
+
+  /api/clusters/{clusterName}/topics/{topicName}/messages/v2:
+    get:
+      tags:
+        - Messages
+      summary: getTopicMessagesV2
+      operationId: getTopicMessagesV2
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: mode
+          in: query
+          description: Messages polling mode
+          required: true
+          schema:
+            $ref: "#/components/schemas/PollingMode"
+        - name: partitions
+          in: query
+          schema:
+            type: array
+            description: List of target partitions (all partitions if not provided)
+            items:
+              type: integer
+        - name: limit
+          in: query
+          description: Max number of messages can be returned
+          schema:
+            type: integer
+        - name: stringFilter
+          in: query
+          description: query string to contains string filtration
+          schema:
+            type: string
+        - name: smartFilterId
+          in: query
+          description: filter id, that was registered beforehand
+          schema:
+            type: string
+        - name: offset
+          in: query
+          description: message offset to read from / to
+          schema:
+            type: integer
+            format: int64
+        - name: timestamp
+          in: query
+          description: timestamp (in ms) to read from / to
+          schema:
+            type: integer
+            format: int64
+        - name: keySerde
+          in: query
+          description: "Serde that should be used for deserialization. Will be chosen automatically if not set."
+          schema:
+            type: string
+        - name: valueSerde
+          in: query
+          description: "Serde that should be used for deserialization. Will be chosen automatically if not set."
+          schema:
+            type: string
+        - name: cursor
+          in: query
+          description: "id of the cursor for pagination"
+          schema:
+            type: string
+      responses:
+        200:
+          description: OK
+          content:
+            text/event-stream:
+              schema:
+                type: array
+                items:
+                  $ref: '#/components/schemas/TopicMessageEvent'
+
   /api/clusters/{clusterName}/topics/{topicName}/activeproducers:
     get:
       tags:
@@ -1776,188 +1870,6 @@ paths:
         404:
           description: Not found
 
-  /api/clusters/{clusterName}/acls:
-    get:
-      tags:
-        - Acls
-      summary: listKafkaAcls
-      operationId: listAcls
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-        - name: resourceType
-          in: query
-          required: false
-          schema:
-            $ref: '#/components/schemas/KafkaAclResourceType'
-        - name: resourceName
-          in: query
-          required: false
-          schema:
-            type: string
-        - name: namePatternType
-          in: query
-          required: false
-          schema:
-            $ref: '#/components/schemas/KafkaAclNamePatternType'
-      responses:
-        200:
-          description: OK
-          content:
-            application/json:
-              schema:
-                type: array
-                items:
-                  $ref: '#/components/schemas/KafkaAcl'
-
-  /api/clusters/{clusterName}/acl/csv:
-    get:
-      tags:
-        - Acls
-      summary: getAclAsCsv
-      operationId: getAclAsCsv
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      responses:
-        200:
-          description: OK
-          content:
-            text/plain:
-              schema:
-                type: string
-    post:
-      tags:
-        - Acls
-      summary: syncAclsCsv
-      operationId: syncAclsCsv
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      requestBody:
-        content:
-          text/plain:
-            schema:
-              type: string
-      responses:
-        200:
-          description: OK
-
-  /api/clusters/{clusterName}/acl:
-    post:
-      tags:
-        - Acls
-      summary: createAcl
-      operationId: createAcl
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      requestBody:
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/KafkaAcl'
-      responses:
-        200:
-          description: OK
-
-    delete:
-      tags:
-        - Acls
-      summary: deleteAcl
-      operationId: deleteAcl
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      requestBody:
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/KafkaAcl'
-      responses:
-        200:
-          description: OK
-        404:
-          description: Acl not found
-
-  /api/clusters/{clusterName}/acl/consumer:
-    post:
-      tags:
-        - Acls
-      summary: createConsumerAcl
-      operationId: createConsumerAcl
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      requestBody:
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/CreateConsumerAcl'
-      responses:
-        200:
-          description: OK
-
-  /api/clusters/{clusterName}/acl/producer:
-    post:
-      tags:
-        - Acls
-      summary: createProducerAcl
-      operationId: createProducerAcl
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      requestBody:
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/CreateProducerAcl'
-      responses:
-        200:
-          description: OK
-
-  /api/clusters/{clusterName}/acl/streamApp:
-    post:
-      tags:
-        - Acls
-      summary: createStreamAppAcl
-      operationId: createStreamAppAcl
-      parameters:
-        - name: clusterName
-          in: path
-          required: true
-          schema:
-            type: string
-      requestBody:
-        content:
-          application/json:
-            schema:
-              $ref: '#/components/schemas/CreateStreamAppAcl'
-      responses:
-        200:
-          description: OK
-
   /api/authorization:
     get:
       tags:
@@ -2047,7 +1959,7 @@ paths:
               properties:
                 file:
                   type: string
-                  format: binary
+                  format: filepart
       responses:
         200:
           description: OK
@@ -2145,26 +2057,6 @@ components:
             type: string
             enum:
               - DYNAMIC_CONFIG
-        build:
-          type: object
-          properties:
-            commitId:
-              type: string
-            version:
-              type: string
-            buildTime:
-              type: string
-            isLatestRelease:
-              type: boolean
-        latestRelease:
-          type: object
-          properties:
-            versionTag:
-              type: string
-            publishedAt:
-              type: string
-            htmlUrl:
-              type: string
 
     Cluster:
       type: object
@@ -2200,8 +2092,6 @@ components:
               - KAFKA_CONNECT
               - KSQL_DB
               - TOPIC_DELETION
-              - KAFKA_ACL_VIEW # get ACLs listing
-              - KAFKA_ACL_EDIT # create & delete ACLs
       required:
         - id
         - name
@@ -2605,16 +2495,6 @@ components:
           type: number
         bytesOutPerSec:
           type: number
-        partitionsLeader:
-          type: integer
-        partitions:
-          type: integer
-        inSyncPartitions:
-          type: integer
-        partitionsSkew:
-          type: number
-        leadersSkew:
-          type: number
       required:
         - id
 
@@ -2672,10 +2552,6 @@ components:
           format: int64
 
     ConsumerGroup:
-      discriminator:
-        propertyName: inherit
-        mapping:
-          details: "#/components/schemas/ConsumerGroupDetails"
       type: object
       properties:
         groupId:
@@ -2692,7 +2568,7 @@ components:
           $ref: "#/components/schemas/ConsumerGroupState"
         coordinator:
           $ref: "#/components/schemas/Broker"
-        consumerLag:
+        messagesBehind:
           type: integer
           format: int64
           description: null if consumer group has no offsets committed
@@ -2705,8 +2581,6 @@ components:
         - NAME
         - MEMBERS
         - STATE
-        - MESSAGES_BEHIND
-        - TOPIC_NUM
 
     ConsumerGroupsPageResponse:
       type: object
@@ -2718,37 +2592,6 @@ components:
           items:
             $ref: '#/components/schemas/ConsumerGroup'
 
-    SmartFilterTestExecution:
-      type: object
-      required: [filterCode]
-      properties:
-        filterCode:
-          type: string
-        key:
-          type: string
-        value:
-          type: string
-        headers:
-          type: object
-          additionalProperties:
-            type: string
-        partition:
-          type: integer
-        offset:
-          type: integer
-          format: int64
-        timestampMs:
-          type: integer
-          format: int64
-
-    SmartFilterTestExecutionResult:
-      type: object
-      properties:
-        result:
-          type: boolean
-        error:
-          type: string
-
     CreateTopicMessage:
       type: object
       properties:
@@ -2783,13 +2626,14 @@ components:
             - MESSAGE
             - CONSUMING
             - DONE
-            - EMIT_THROTTLING
         message:
           $ref: "#/components/schemas/TopicMessage"
         phase:
           $ref: "#/components/schemas/TopicMessagePhase"
         consuming:
           $ref: "#/components/schemas/TopicMessageConsuming"
+        cursor:
+          $ref: "#/components/schemas/TopicMessageNextPageCursor"
 
     TopicMessagePhase:
       type: object
@@ -2819,6 +2663,11 @@ components:
         filterApplyErrors:
           type: integer
 
+    TopicMessageNextPageCursor:
+      type: object
+      properties:
+        id:
+          type: string
 
     TopicMessage:
       type: object
@@ -2891,6 +2740,29 @@ components:
         - TIMESTAMP
         - LATEST
 
+    MessageFilterRegistration:
+      type: object
+      properties:
+        filterCode:
+          type: string
+
+    MessageFilterId:
+      type: object
+      properties:
+        id:
+          type: string
+
+    PollingMode:
+      type: string
+      enum:
+        - FROM_OFFSET
+        - TO_OFFSET
+        - FROM_TIMESTAMP
+        - TO_TIMESTAMP
+        - LATEST
+        - EARLIEST
+        - TAILING
+
     MessageFilterType:
       type: string
       enum:
@@ -2941,7 +2813,7 @@ components:
         endOffset:
           type: integer
           format: int64
-        consumerLag:
+        messagesBehind:
           type: integer
           format: int64
           description: null if consumer group has no offsets committed
@@ -3031,10 +2903,6 @@ components:
           type: string
         schemaType:
           $ref: '#/components/schemas/SchemaType'
-        references:
-          type: array
-          items:
-            $ref: '#/components/schemas/SchemaReference'
       required:
         - id
         - subject
@@ -3052,30 +2920,13 @@ components:
         schema:
           type: string
         schemaType:
-          $ref: '#/components/schemas/SchemaType' # upon updating a schema, the type of existing schema can't be changed
-        references:
-          type: array
-          items:
-            $ref: '#/components/schemas/SchemaReference'
+          $ref: '#/components/schemas/SchemaType'
+          # upon updating a schema, the type of existing schema can't be changed
       required:
         - subject
         - schema
         - schemaType
 
-    SchemaReference:
-      type: object
-      properties:
-        name:
-          type: string
-        subject:
-          type: string
-        version:
-          type: integer
-      required:
-        - name
-        - subject
-        - version
-
     CompatibilityLevel:
       type: object
       properties:
@@ -3638,7 +3489,6 @@ components:
         - MESSAGES_READ
         - MESSAGES_PRODUCE
         - MESSAGES_DELETE
-        - RESTART
 
     ResourceType:
       type: string
@@ -3650,126 +3500,6 @@ components:
         - SCHEMA
         - CONNECT
         - KSQL
-        - ACL
-        - AUDIT
-
-    KafkaAcl:
-      type: object
-      required: [resourceType, resourceName, namePatternType, principal, host, operation, permission]
-      properties:
-        resourceType:
-          $ref: '#/components/schemas/KafkaAclResourceType'
-        resourceName:
-          type: string # "*" if acl can be applied to any resource of given type
-        namePatternType:
-          $ref: '#/components/schemas/KafkaAclNamePatternType'
-        principal:
-          type: string
-        host:
-          type: string
-        operation:
-          type: string
-          enum:
-            - UNKNOWN # Unknown operation, need to update mapping code on BE
-            - ALL # Cluster, Topic, Group
-            - READ  # Topic, Group
-            - WRITE # Topic, TransactionalId
-            - CREATE # Cluster, Topic
-            - DELETE  # Topic, Group
-            - ALTER  # Cluster, Topic,
-            - DESCRIBE # Cluster, Topic, Group, TransactionalId, DelegationToken
-            - CLUSTER_ACTION # Cluster
-            - DESCRIBE_CONFIGS # Cluster, Topic
-            - ALTER_CONFIGS   # Cluster, Topic
-            - IDEMPOTENT_WRITE # Cluster
-            - CREATE_TOKENS
-            - DESCRIBE_TOKENS
-        permission:
-          type: string
-          enum:
-            - ALLOW
-            - DENY
-
-    CreateConsumerAcl:
-      type: object
-      required: [principal, host]
-      properties:
-        principal:
-          type: string
-        host:
-          type: string
-        topics:
-          type: array
-          items:
-            type: string
-        topicsPrefix:
-          type: string
-        consumerGroups:
-          type: array
-          items:
-            type: string
-        consumerGroupsPrefix:
-          type: string
-
-    CreateProducerAcl:
-      type: object
-      required: [principal, host]
-      properties:
-        principal:
-          type: string
-        host:
-          type: string
-        topics:
-          type: array
-          items:
-            type: string
-        topicsPrefix:
-          type: string
-        transactionalId:
-          type: string
-        transactionsIdPrefix:
-          type: string
-        idempotent:
-          type: boolean
-          default: false
-
-    CreateStreamAppAcl:
-      type: object
-      required: [principal, host, applicationId, inputTopics, outputTopics]
-      properties:
-        principal:
-          type: string
-        host:
-          type: string
-        inputTopics:
-          type: array
-          items:
-            type: string
-        outputTopics:
-          type: array
-          items:
-            type: string
-        applicationId:
-          nullable: false
-          type: string
-
-    KafkaAclResourceType:
-      type: string
-      enum:
-        - UNKNOWN # Unknown operation, need to update mapping code on BE
-        - TOPIC
-        - GROUP
-        - CLUSTER
-        - TRANSACTIONAL_ID
-        - DELEGATION_TOKEN
-        - USER
-
-    KafkaAclNamePatternType:
-      type: string
-      enum:
-        - MATCH
-        - LITERAL
-        - PREFIXED
 
     RestartRequest:
       type: object
@@ -3906,28 +3636,9 @@ components:
                               type: array
                               items:
                                 $ref: '#/components/schemas/Action'
-            webclient:
-              type: object
-              properties:
-                maxInMemoryBufferSize:
-                  type: string
-                  description: "examples: 20, 12KB, 5MB"
             kafka:
               type: object
               properties:
-                polling:
-                  type: object
-                  properties:
-                    pollTimeoutMs:
-                      type: integer
-                    maxPageSize:
-                      type: integer
-                    defaultPageSize:
-                      type: integer
-                adminClientTimeout:
-                  type: integer
-                internalTopicPrefix:
-                  type: string
                 clusters:
                   type: array
                   items:
@@ -4056,9 +3767,7 @@ components:
                               type: array
                               items:
                                 type: string
-                            fieldsNamePattern:
-                              type: string
-                            maskingCharsReplacement:
+                            pattern:
                               type: array
                               items:
                                 type: string
@@ -4071,21 +3780,3 @@ components:
                       pollingThrottleRate:
                         type: integer
                         format: int64
-                      audit:
-                        type: object
-                        properties:
-                          level:
-                            type: string
-                            enum: [ "ALL", "ALTER_ONLY" ]
-                          topic:
-                            type: string
-                          auditTopicsPartitions:
-                            type: integer
-                          topicAuditEnabled:
-                            type: boolean
-                          consoleAuditEnabled:
-                            type: boolean
-                          auditTopicProperties:
-                            type: object
-                            additionalProperties:
-                              type: string

+ 2 - 2
kafka-ui-react-app/package.json

@@ -21,7 +21,7 @@
     "fetch-mock": "^9.11.0",
     "jest": "^29.4.3",
     "jest-watch-typeahead": "^2.2.2",
-    "json-schema-faker": "^0.5.0-rcv.44",
+    "json-schema-faker": "^0.5.6",
     "jsonpath-plus": "^7.2.0",
     "lodash": "^4.17.21",
     "lossless-json": "^2.0.8",
@@ -109,4 +109,4 @@
     "node": "v18.17.1",
     "pnpm": "^8.6.12"
   }
-}
+}

+ 19 - 15
kafka-ui-react-app/pnpm-lock.yaml

@@ -57,8 +57,8 @@ dependencies:
     specifier: ^2.2.2
     version: 2.2.2(jest@29.6.4)
   json-schema-faker:
-    specifier: ^0.5.0-rcv.44
-    version: 0.5.3
+    specifier: ^0.5.6
+    version: 0.5.6
   jsonpath-plus:
     specifier: ^7.2.0
     version: 7.2.0
@@ -91,7 +91,7 @@ dependencies:
     version: 7.43.1(react@18.2.0)
   react-hot-toast:
     specifier: ^2.4.0
-    version: 2.4.1(csstype@3.1.2)(react-dom@18.1.0)(react@18.2.0)
+    version: 2.4.1(csstype@3.1.3)(react-dom@18.1.0)(react@18.2.0)
   react-is:
     specifier: ^18.2.0
     version: 18.2.0
@@ -2606,7 +2606,7 @@ packages:
       normalize-path: 3.0.0
       readdirp: 3.6.0
     optionalDependencies:
-      fsevents: 2.3.2
+      fsevents: 2.3.3
 
   /ci-info@3.3.1:
     resolution: {integrity: sha512-SXgeMX9VwDe7iFFaEWkA5AstuER9YKqy4EhHqr4DVqkwmD9rpVimkMKWHdjn30Ja45txyjhSn63lVX69eVCckg==}
@@ -2808,6 +2808,10 @@ packages:
   /csstype@3.1.2:
     resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==}
 
+  /csstype@3.1.3:
+    resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==}
+    dev: false
+
   /damerau-levenshtein@1.0.8:
     resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==}
     dev: true
@@ -3741,8 +3745,8 @@ packages:
   /fs.realpath@1.0.0:
     resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
 
-  /fsevents@2.3.2:
-    resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==}
+  /fsevents@2.3.3:
+    resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==}
     engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0}
     os: [darwin]
     requiresBuild: true
@@ -3903,12 +3907,12 @@ packages:
   /globrex@0.1.2:
     resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==}
 
-  /goober@2.1.10(csstype@3.1.2):
+  /goober@2.1.10(csstype@3.1.3):
     resolution: {integrity: sha512-7PpuQMH10jaTWm33sQgBQvz45pHR8N4l3Cu3WMGEWmHShAcTuuP7I+5/DwKo39fwti5A80WAjvqgz6SSlgWmGA==}
     peerDependencies:
       csstype: ^3.0.10
     dependencies:
-      csstype: 3.1.2
+      csstype: 3.1.3
     dev: false
 
   /gopd@1.0.1:
@@ -4544,7 +4548,7 @@ packages:
       micromatch: 4.0.5
       walker: 1.0.8
     optionalDependencies:
-      fsevents: 2.3.2
+      fsevents: 2.3.3
     dev: false
 
   /jest-leak-detector@29.6.3:
@@ -4903,8 +4907,8 @@ packages:
   /json-parse-even-better-errors@2.3.1:
     resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==}
 
-  /json-schema-faker@0.5.3:
-    resolution: {integrity: sha512-BeIrR0+YSrTbAR9dOMnjbFl1MvHyXnq+Wpdw1FpWZDHWKLzK229hZ5huyPcmzFUfVq1ODwf40WdGVoE266UBUg==}
+  /json-schema-faker@0.5.6:
+    resolution: {integrity: sha512-u/cFC26/GDxh2vPiAC8B8xVvpXAW+QYtG2mijEbKrimCk8IHtiwQBjCE8TwvowdhALWq9IcdIWZ+/8ocXvdL3Q==}
     hasBin: true
     dependencies:
       json-schema-ref-parser: 6.1.0
@@ -5711,14 +5715,14 @@ packages:
       react: 18.2.0
     dev: false
 
-  /react-hot-toast@2.4.1(csstype@3.1.2)(react-dom@18.1.0)(react@18.2.0):
+  /react-hot-toast@2.4.1(csstype@3.1.3)(react-dom@18.1.0)(react@18.2.0):
     resolution: {integrity: sha512-j8z+cQbWIM5LY37pR6uZR6D4LfseplqnuAO4co4u8917hBUvXlEqyP1ZzqVLcqoyUesZZv/ImreoCeHVDpE5pQ==}
     engines: {node: '>=10'}
     peerDependencies:
       react: '>=16'
       react-dom: '>=16'
     dependencies:
-      goober: 2.1.10(csstype@3.1.2)
+      goober: 2.1.10(csstype@3.1.3)
       react: 18.2.0
       react-dom: 18.1.0(react@18.2.0)
     transitivePeerDependencies:
@@ -6022,7 +6026,7 @@ packages:
     engines: {node: '>=14.18.0', npm: '>=8.0.0'}
     hasBin: true
     optionalDependencies:
-      fsevents: 2.3.2
+      fsevents: 2.3.3
 
   /run-async@2.4.1:
     resolution: {integrity: sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==}
@@ -6755,7 +6759,7 @@ packages:
       rollup: 3.7.3
       sass: 1.66.1
     optionalDependencies:
-      fsevents: 2.3.2
+      fsevents: 2.3.3
 
   /w3c-hr-time@1.0.2:
     resolution: {integrity: sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ==}

+ 124 - 91
kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/Filters.tsx

@@ -3,6 +3,7 @@ import 'react-datepicker/dist/react-datepicker.css';
 import {
   MessageFilterType,
   Partition,
+  PollingMode,
   SeekDirection,
   SeekType,
   SerdeUsage,
@@ -10,6 +11,7 @@ import {
   TopicMessageConsuming,
   TopicMessageEvent,
   TopicMessageEventTypeEnum,
+  TopicMessageNextPageCursor,
 } from 'generated-sources';
 import React, { useContext } from 'react';
 import omitBy from 'lodash/omitBy';
@@ -35,18 +37,22 @@ import CloseIcon from 'components/common/Icons/CloseIcon';
 import ClockIcon from 'components/common/Icons/ClockIcon';
 import ArrowDownIcon from 'components/common/Icons/ArrowDownIcon';
 import FileIcon from 'components/common/Icons/FileIcon';
-import { useTopicDetails } from 'lib/hooks/api/topics';
+import { useRegisterFilter, useTopicDetails } from 'lib/hooks/api/topics';
 import { InputLabel } from 'components/common/Input/InputLabel.styled';
 import { getSerdeOptions } from 'components/Topics/Topic/SendMessage/utils';
 import { useSerdes } from 'lib/hooks/api/topicMessages';
+import { getTopicMessgesLastLoadedPage } from 'redux/reducers/topicMessages/selectors';
+import { useAppSelector } from 'lib/hooks/redux';
+import { showAlert } from 'lib/errorHandling';
 
-import * as S from './Filters.styled';
+import { getDefaultSerdeName } from './getDefaultSerdeName';
 import {
   filterOptions,
   getOffsetFromSeekToParam,
   getSelectedPartitionsFromSeekToParam,
   getTimestampFromSeekToParam,
 } from './utils';
+import * as S from './Filters.styled';
 
 type Query = Record<string, string | string[] | number>;
 
@@ -55,12 +61,18 @@ export interface FiltersProps {
   meta: TopicMessageConsuming;
   isFetching: boolean;
   messageEventType?: string;
+  cursor?: TopicMessageNextPageCursor;
+  currentPage: number;
   addMessage(content: { message: TopicMessage; prepend: boolean }): void;
   resetMessages(): void;
   updatePhase(phase: string): void;
   updateMeta(meta: TopicMessageConsuming): void;
   setIsFetching(status: boolean): void;
   setMessageType(messageType: string): void;
+  updateCursor(cursor?: TopicMessageNextPageCursor): void;
+  setCurrentPage(page: number): void;
+  setLastLoadedPage(page: number): void;
+  resetAllMessages(): void;
 }
 
 export interface MessageFilters {
@@ -85,6 +97,7 @@ const Filters: React.FC<FiltersProps> = ({
   phaseMessage,
   meta: { elapsedMs, bytesConsumed, messagesConsumed, filterApplyErrors },
   isFetching,
+  currentPage,
   addMessage,
   resetMessages,
   updatePhase,
@@ -92,19 +105,25 @@ const Filters: React.FC<FiltersProps> = ({
   setIsFetching,
   setMessageType,
   messageEventType,
+  updateCursor,
+  setCurrentPage,
+  setLastLoadedPage,
+  resetAllMessages,
 }) => {
   const { clusterName, topicName } = useAppParams<RouteParamsClusterTopic>();
   const location = useLocation();
   const navigate = useNavigate();
   const [searchParams] = useSearchParams();
 
-  const page = searchParams.get('page');
-
   const { data: topic } = useTopicDetails({ clusterName, topicName });
 
+  const registerFilter = useRegisterFilter({ clusterName, topicName });
+
+  const lastLoadedPage = useAppSelector(getTopicMessgesLastLoadedPage);
+
   const partitions = topic?.partitions || [];
 
-  const { seekDirection, isLive, changeSeekDirection } =
+  const { seekDirection, isLive, changeSeekDirection, page, setPage } =
     useContext(TopicMessagesContext);
 
   const { value: isOpen, toggle } = useBoolean();
@@ -131,11 +150,18 @@ const Filters: React.FC<FiltersProps> = ({
   const [timestamp, setTimestamp] = React.useState<Date | null>(
     getTimestampFromSeekToParam(searchParams)
   );
+
+  const { data: serdes = {} } = useSerdes({
+    clusterName,
+    topicName,
+    use: SerdeUsage.DESERIALIZE,
+  });
+
   const [keySerde, setKeySerde] = React.useState<string>(
-    searchParams.get('keySerde') || ''
+    searchParams.get('keySerde') || getDefaultSerdeName(serdes.key || [])
   );
   const [valueSerde, setValueSerde] = React.useState<string>(
-    searchParams.get('valueSerde') || ''
+    searchParams.get('valueSerde') || getDefaultSerdeName(serdes.value || [])
   );
 
   const [savedFilters, setSavedFilters] = React.useState<MessageFilters[]>(
@@ -155,7 +181,7 @@ const Filters: React.FC<FiltersProps> = ({
       ? MessageFilterType.GROOVY_SCRIPT
       : MessageFilterType.STRING_CONTAINS
   );
-  const [query, setQuery] = React.useState<string>(searchParams.get('q') || '');
+  const [stringFilter, setStringFilter] = React.useState<string>('');
   const [isTailing, setIsTailing] = React.useState<boolean>(isLive);
 
   const isSeekTypeControlVisible = React.useMemo(
@@ -173,23 +199,12 @@ const Filters: React.FC<FiltersProps> = ({
     return false;
   }, [isSeekTypeControlVisible, currentSeekType, timestamp, isTailing]);
 
-  const partitionMap = React.useMemo(
-    () =>
-      partitions.reduce<Record<string, Partition>>(
-        (acc, partition) => ({
-          ...acc,
-          [partition.partition]: partition,
-        }),
-        {}
-      ),
-    [partitions]
-  );
-
   const handleClearAllFilters = () => {
     setCurrentSeekType(SeekType.OFFSET);
     setOffset('');
     setTimestamp(null);
-    setQuery('');
+    setStringFilter('');
+    setPage(1);
     changeSeekDirection(SeekDirection.FORWARD);
     getSelectedPartitionsFromSeekToParam(searchParams, partitions);
     setSelectedPartitions(
@@ -202,65 +217,60 @@ const Filters: React.FC<FiltersProps> = ({
     );
   };
 
-  const handleFiltersSubmit = (currentOffset: string) => {
-    const nextAttempt = Number(searchParams.get('attempt') || 0) + 1;
+  const getPollingMode = (): PollingMode => {
+    if (seekDirection === SeekDirection.FORWARD) {
+      if (offset && currentSeekType === SeekType.OFFSET)
+        return PollingMode.FROM_OFFSET;
+      if (timestamp && currentSeekType === SeekType.TIMESTAMP)
+        return PollingMode.FROM_TIMESTAMP;
+      return PollingMode.EARLIEST;
+    }
+    if (seekDirection === SeekDirection.BACKWARD) {
+      if (offset && currentSeekType === SeekType.OFFSET)
+        return PollingMode.TO_OFFSET;
+      if (timestamp && currentSeekType === SeekType.TIMESTAMP)
+        return PollingMode.TO_TIMESTAMP;
+      return PollingMode.LATEST;
+    }
+    if (seekDirection === SeekDirection.TAILING) return PollingMode.TAILING;
+    return PollingMode.LATEST;
+  };
+
+  const getSmartFilterId = async (code: string) => {
+    try {
+      const filterId = await registerFilter.mutateAsync({
+        filterCode: code,
+      });
+      return filterId;
+    } catch (e) {
+      showAlert('error', {
+        message: 'Error occured while registering smart filter',
+      });
+      return '';
+    }
+  };
+
+  const handleFiltersSubmit = async (cursor?: TopicMessageNextPageCursor) => {
+    if (!keySerde || !valueSerde) return;
     const props: Query = {
-      q:
-        queryType === MessageFilterType.GROOVY_SCRIPT
-          ? activeFilter.code
-          : query,
-      filterQueryType: queryType,
-      attempt: nextAttempt,
+      mode: getPollingMode(),
       limit: PER_PAGE,
-      page: page || 0,
-      seekDirection,
+      stringFilter,
+      offset,
+      timestamp: timestamp?.getTime() || 0,
       keySerde: keySerde || searchParams.get('keySerde') || '',
       valueSerde: valueSerde || searchParams.get('valueSerde') || '',
     };
 
-    if (isSeekTypeControlVisible) {
-      switch (seekDirection) {
-        case SeekDirection.FORWARD:
-          props.seekType = SeekType.BEGINNING;
-          break;
-        case SeekDirection.BACKWARD:
-        case SeekDirection.TAILING:
-          props.seekType = SeekType.LATEST;
-          break;
-        default:
-          props.seekType = currentSeekType;
-      }
-
-      if (offset && currentSeekType === SeekType.OFFSET) {
-        props.seekType = SeekType.OFFSET;
-      }
+    if (cursor?.id) props.cursor = cursor?.id;
 
-      if (timestamp && currentSeekType === SeekType.TIMESTAMP) {
-        props.seekType = SeekType.TIMESTAMP;
-      }
+    if (selectedPartitions.length !== partitions.length) {
+      props.partitions = selectedPartitions.map((p) => p.value);
+    }
 
-      const isSeekTypeWithSeekTo =
-        props.seekType === SeekType.TIMESTAMP ||
-        props.seekType === SeekType.OFFSET;
-
-      if (
-        selectedPartitions.length !== partitions.length ||
-        isSeekTypeWithSeekTo
-      ) {
-        // not everything in the partition is selected
-        props.seekTo = selectedPartitions.map(({ value }) => {
-          const offsetProperty =
-            seekDirection === SeekDirection.FORWARD ? 'offsetMin' : 'offsetMax';
-          const offsetBasedSeekTo =
-            currentOffset || partitionMap[value][offsetProperty];
-          const seekToOffset =
-            currentSeekType === SeekType.OFFSET
-              ? offsetBasedSeekTo
-              : timestamp?.getTime();
-
-          return `${value}::${seekToOffset || '0'}`;
-        });
-      }
+    if (queryType === MessageFilterType.GROOVY_SCRIPT) {
+      props.smartFilterId =
+        (await getSmartFilterId(activeFilter.code))?.id || '';
     }
 
     const newProps = omitBy(props, (v) => v === undefined || v === '');
@@ -272,6 +282,12 @@ const Filters: React.FC<FiltersProps> = ({
     });
   };
 
+  const handleSubmit = async () => {
+    setPage(1);
+    resetAllMessages();
+    handleFiltersSubmit();
+  };
+
   const handleSSECancel = () => {
     if (!source.current) return;
     setIsFetching(false);
@@ -345,9 +361,15 @@ const Filters: React.FC<FiltersProps> = ({
   // eslint-disable-next-line consistent-return
   React.useEffect(() => {
     if (location.search?.length !== 0) {
+      if (page === currentPage) return () => {};
+      if (page <= lastLoadedPage) {
+        setCurrentPage(page);
+        return () => {};
+      }
+
       const url = `${BASE_PARAMS.basePath}/api/clusters/${encodeURIComponent(
         clusterName
-      )}/topics/${topicName}/messages${location.search}`;
+      )}/topics/${topicName}/messages/v2${location.search}`;
       const sse = new EventSource(url);
 
       source.current = sse;
@@ -358,7 +380,7 @@ const Filters: React.FC<FiltersProps> = ({
         setIsFetching(true);
       };
       sse.onmessage = ({ data }) => {
-        const { type, message, phase, consuming }: TopicMessageEvent =
+        const { type, message, phase, consuming, cursor }: TopicMessageEvent =
           JSON.parse(data);
         switch (type) {
           case TopicMessageEventTypeEnum.MESSAGE:
@@ -381,6 +403,10 @@ const Filters: React.FC<FiltersProps> = ({
             if (consuming && type) {
               setMessageType(type);
               updateMeta(consuming);
+              updateCursor(cursor);
+              setCurrentPage(page);
+              setLastLoadedPage(page);
+              handleFiltersSubmit(cursor);
             }
             break;
           default:
@@ -407,10 +433,15 @@ const Filters: React.FC<FiltersProps> = ({
     setIsFetching,
     updateMeta,
     updatePhase,
+    updateCursor,
+    setLastLoadedPage,
   ]);
+
   React.useEffect(() => {
     if (location.search?.length === 0) {
-      handleFiltersSubmit(offset);
+      setPage(1);
+      resetAllMessages();
+      handleFiltersSubmit();
     }
   }, [
     seekDirection,
@@ -418,32 +449,32 @@ const Filters: React.FC<FiltersProps> = ({
     activeFilter,
     currentSeekType,
     timestamp,
-    query,
+    stringFilter,
     location,
   ]);
+
   React.useEffect(() => {
-    handleFiltersSubmit(offset);
+    setPage(1);
+    resetAllMessages();
+    handleFiltersSubmit();
   }, [
     seekDirection,
     queryType,
-    activeFilter,
     currentSeekType,
-    timestamp,
-    query,
     seekDirection,
-    page,
+    keySerde,
+    valueSerde,
   ]);
 
+  React.useEffect(() => {
+    setPage(1);
+    resetAllMessages();
+  }, [selectedPartitions, offset, timestamp, stringFilter, activeFilter]);
+
   React.useEffect(() => {
     setIsTailing(isLive);
   }, [isLive]);
 
-  const { data: serdes = {} } = useSerdes({
-    clusterName,
-    topicName,
-    use: SerdeUsage.DESERIALIZE,
-  });
-
   return (
     <S.FiltersWrapper>
       <div>
@@ -530,9 +561,7 @@ const Filters: React.FC<FiltersProps> = ({
             buttonType="secondary"
             buttonSize="M"
             disabled={isSubmitDisabled}
-            onClick={() =>
-              isFetching ? handleSSECancel() : handleFiltersSubmit(offset)
-            }
+            onClick={() => (isFetching ? handleSSECancel() : handleSubmit())}
             style={{ fontWeight: 500 }}
           >
             {isFetching ? 'Cancel' : 'Submit'}
@@ -548,7 +577,11 @@ const Filters: React.FC<FiltersProps> = ({
         />
       </div>
       <S.ActiveSmartFilterWrapper>
-        <Search placeholder="Search" disabled={isTailing} />
+        <Search
+          placeholder="Search"
+          disabled={isTailing}
+          onChange={setStringFilter}
+        />
 
         <Button buttonType="secondary" buttonSize="M" onClick={toggle}>
           <PlusIcon />

+ 12 - 0
kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/FiltersContainer.ts

@@ -7,12 +7,18 @@ import {
   updateTopicMessagesPhase,
   setTopicMessagesFetchingStatus,
   setMessageEventType,
+  updateTopicMessagesCursor,
+  setTopicMessagesCurrentPage,
+  setTopicMessagesLastLoadedPage,
+  resetAllTopicMessages,
 } from 'redux/reducers/topicMessages/topicMessagesSlice';
 import {
   getTopicMessgesMeta,
   getTopicMessgesPhase,
   getIsTopicMessagesFetching,
   getIsTopicMessagesType,
+  getTopicMessgesCursor,
+  getTopicMessgesCurrentPage,
 } from 'redux/reducers/topicMessages/selectors';
 
 import Filters from './Filters';
@@ -22,6 +28,8 @@ const mapStateToProps = (state: RootState) => ({
   meta: getTopicMessgesMeta(state),
   isFetching: getIsTopicMessagesFetching(state),
   messageEventType: getIsTopicMessagesType(state),
+  cursor: getTopicMessgesCursor(state),
+  currentPage: getTopicMessgesCurrentPage(state),
 });
 
 const mapDispatchToProps = {
@@ -31,6 +39,10 @@ const mapDispatchToProps = {
   updateMeta: updateTopicMessagesMeta,
   setIsFetching: setTopicMessagesFetchingStatus,
   setMessageType: setMessageEventType,
+  updateCursor: updateTopicMessagesCursor,
+  setCurrentPage: setTopicMessagesCurrentPage,
+  setLastLoadedPage: setTopicMessagesLastLoadedPage,
+  resetAllMessages: resetAllTopicMessages,
 };
 
 export default connect(mapStateToProps, mapDispatchToProps)(Filters);

+ 0 - 0
kafka-ui-react-app/src/components/Topics/Topic/Messages/getDefaultSerdeName.ts → kafka-ui-react-app/src/components/Topics/Topic/Messages/Filters/getDefaultSerdeName.ts


+ 12 - 33
kafka-ui-react-app/src/components/Topics/Topic/Messages/Messages.tsx

@@ -1,27 +1,22 @@
 import React, { useCallback, useMemo, useState } from 'react';
 import TopicMessagesContext from 'components/contexts/TopicMessagesContext';
-import { SeekDirection, SerdeUsage } from 'generated-sources';
+import { SeekDirection } from 'generated-sources';
 import { useSearchParams } from 'react-router-dom';
-import { useSerdes } from 'lib/hooks/api/topicMessages';
-import useAppParams from 'lib/hooks/useAppParams';
-import { RouteParamsClusterTopic } from 'lib/paths';
-import { getDefaultSerdeName } from 'components/Topics/Topic/Messages/getDefaultSerdeName';
-import { MESSAGES_PER_PAGE } from 'lib/constants';
 
 import MessagesTable from './MessagesTable';
 import FiltersContainer from './Filters/FiltersContainer';
 
 export const SeekDirectionOptionsObj = {
-  [SeekDirection.FORWARD]: {
-    value: SeekDirection.FORWARD,
-    label: 'Oldest First',
-    isLive: false,
-  },
   [SeekDirection.BACKWARD]: {
     value: SeekDirection.BACKWARD,
     label: 'Newest First',
     isLive: false,
   },
+  [SeekDirection.FORWARD]: {
+    value: SeekDirection.FORWARD,
+    label: 'Oldest First',
+    isLive: false,
+  },
   [SeekDirection.TAILING]: {
     value: SeekDirection.TAILING,
     label: 'Live Mode',
@@ -32,27 +27,7 @@ export const SeekDirectionOptionsObj = {
 export const SeekDirectionOptions = Object.values(SeekDirectionOptionsObj);
 
 const Messages: React.FC = () => {
-  const [searchParams, setSearchParams] = useSearchParams();
-  const { clusterName, topicName } = useAppParams<RouteParamsClusterTopic>();
-
-  const { data: serdes = {} } = useSerdes({
-    clusterName,
-    topicName,
-    use: SerdeUsage.DESERIALIZE,
-  });
-
-  React.useEffect(() => {
-    if (!searchParams.get('keySerde')) {
-      searchParams.set('keySerde', getDefaultSerdeName(serdes.key || []));
-    }
-    if (!searchParams.get('valueSerde')) {
-      searchParams.set('valueSerde', getDefaultSerdeName(serdes.value || []));
-    }
-    if (!searchParams.get('limit')) {
-      searchParams.set('limit', MESSAGES_PER_PAGE);
-    }
-    setSearchParams(searchParams);
-  }, [serdes]);
+  const [searchParams] = useSearchParams();
 
   const defaultSeekValue = SeekDirectionOptions[0];
 
@@ -65,6 +40,8 @@ const Messages: React.FC = () => {
     SeekDirectionOptionsObj[seekDirection].isLive
   );
 
+  const [page, setPage] = React.useState<number>(1);
+
   const changeSeekDirection = useCallback((val: string) => {
     switch (val) {
       case SeekDirection.FORWARD:
@@ -87,9 +64,11 @@ const Messages: React.FC = () => {
     () => ({
       seekDirection,
       changeSeekDirection,
+      page,
+      setPage,
       isLive,
     }),
-    [seekDirection, changeSeekDirection]
+    [seekDirection, changeSeekDirection, page, setPage]
   );
 
   return (

+ 7 - 6
kafka-ui-react-app/src/components/Topics/Topic/Messages/MessagesTable.tsx

@@ -24,8 +24,8 @@ const MessagesTable: React.FC = () => {
   const [contentFilters, setContentFilters] = useState<PreviewFilter[]>([]);
 
   const [searchParams, setSearchParams] = useSearchParams();
-  const page = searchParams.get('page');
-  const { isLive } = useContext(TopicMessagesContext);
+  // const page = searchParams.get('page');
+  const { isLive, page, setPage } = useContext(TopicMessagesContext);
 
   const messages = useAppSelector(getTopicMessges);
   const isFetching = useAppSelector(getIsTopicMessagesFetching);
@@ -38,16 +38,17 @@ const MessagesTable: React.FC = () => {
 
   const isNextPageButtonDisabled =
     isPaginationDisabled || messages.length < Number(MESSAGES_PER_PAGE);
-  const isPrevPageButtonDisabled =
-    isPaginationDisabled || !Number(searchParams.get('page'));
+  const isPrevPageButtonDisabled = isPaginationDisabled || page === 1;
 
   const handleNextPage = () => {
-    searchParams.set('page', String(Number(page || 0) + 1));
+    // searchParams.set('page', String(Number(page || 1) + 1));
+    setPage(Number(page || 1) + 1);
     setSearchParams(searchParams);
   };
 
   const handlePrevPage = () => {
-    searchParams.set('page', String(Number(page || 0) - 1));
+    // searchParams.set('page', String(Number(page || 1) - 1));
+    setPage(Number(page || 1) - 1);
     setSearchParams(searchParams);
   };
 

+ 3 - 1
kafka-ui-react-app/src/components/common/Search/Search.tsx

@@ -45,7 +45,9 @@ const Search: React.FC<SearchProps> = ({
     }
   }, 500);
   const clearSearchValue = () => {
-    if (searchParams.get('q')) {
+    if (onChange) {
+      onChange('');
+    } else if (searchParams.get('q')) {
       searchParams.set('q', '');
       setSearchParams(searchParams);
     }

+ 2 - 0
kafka-ui-react-app/src/components/contexts/TopicMessagesContext.ts

@@ -4,6 +4,8 @@ import { SeekDirection } from 'generated-sources';
 export interface ContextProps {
   seekDirection: SeekDirection;
   changeSeekDirection(val: string): void;
+  page: number;
+  setPage(page: number): void;
   isLive: boolean;
 }
 

+ 25 - 0
kafka-ui-react-app/src/lib/hooks/api/topics.ts

@@ -15,6 +15,7 @@ import {
   CreateTopicMessage,
   GetTopicDetailsRequest,
   GetTopicsRequest,
+  MessageFilterRegistration,
   Topic,
   TopicConfig,
   TopicCreation,
@@ -39,6 +40,8 @@ export const topicKeys = {
     [...topicKeys.details(props), 'consumerGroups'] as const,
   statistics: (props: GetTopicDetailsRequest) =>
     [...topicKeys.details(props), 'statistics'] as const,
+  filter: (props: GetTopicDetailsRequest) =>
+    [...topicKeys.details(props), 'messageFilterRegistration'] as const,
 };
 
 export function useTopics(props: GetTopicsRequest) {
@@ -329,3 +332,25 @@ export function useCancelTopicAnalysis(props: GetTopicDetailsRequest) {
     },
   });
 }
+
+export function useRegisterFilter(props: GetTopicDetailsRequest) {
+  const client = useQueryClient();
+  return useMutation(
+    (filter: MessageFilterRegistration) =>
+      messagesApi.registerFilter({
+        ...props,
+        messageFilterRegistration: filter,
+      }),
+    {
+      onSuccess: () => {
+        showSuccessAlert({
+          message: `Filter successfully registered.`,
+        });
+        client.invalidateQueries(topicKeys.filter(props));
+      },
+      onError: (e) => {
+        showServerError(e as Response);
+      },
+    }
+  );
+}

+ 5 - 0
kafka-ui-react-app/src/redux/interfaces/topic.ts

@@ -4,6 +4,7 @@ import {
   TopicCreation,
   TopicMessage,
   TopicMessageConsuming,
+  TopicMessageNextPageCursor,
 } from 'generated-sources';
 
 export type TopicName = Topic['name'];
@@ -52,9 +53,13 @@ export interface TopicFormData {
 }
 
 export interface TopicMessagesState {
+  allMessages: TopicMessage[];
   messages: TopicMessage[];
   phase?: string;
   meta: TopicMessageConsuming;
   messageEventType?: string;
   isFetching: boolean;
+  cursor?: TopicMessageNextPageCursor;
+  currentPage: number;
+  lastLoadedPage: number;
 }

+ 15 - 0
kafka-ui-react-app/src/redux/reducers/topicMessages/selectors.ts

@@ -19,6 +19,21 @@ export const getTopicMessgesMeta = createSelector(
   ({ meta }) => meta
 );
 
+export const getTopicMessgesCursor = createSelector(
+  topicMessagesState,
+  ({ cursor }) => cursor
+);
+
+export const getTopicMessgesCurrentPage = createSelector(
+  topicMessagesState,
+  ({ currentPage }) => currentPage
+);
+
+export const getTopicMessgesLastLoadedPage = createSelector(
+  topicMessagesState,
+  ({ lastLoadedPage }) => lastLoadedPage
+);
+
 export const getIsTopicMessagesFetching = createSelector(
   topicMessagesState,
   ({ isFetching }) => isFetching

+ 44 - 1
kafka-ui-react-app/src/redux/reducers/topicMessages/topicMessagesSlice.ts

@@ -2,7 +2,10 @@ import { createSlice } from '@reduxjs/toolkit';
 import { TopicMessagesState } from 'redux/interfaces';
 import { TopicMessage } from 'generated-sources';
 
+const PER_PAGE = 100;
+
 export const initialState: TopicMessagesState = {
+  allMessages: [],
   messages: [],
   meta: {
     bytesConsumed: 0,
@@ -12,6 +15,8 @@ export const initialState: TopicMessagesState = {
   },
   messageEventType: '',
   isFetching: false,
+  currentPage: 0,
+  lastLoadedPage: 0,
 };
 
 const topicMessagesSlice = createSlice({
@@ -19,16 +24,28 @@ const topicMessagesSlice = createSlice({
   initialState,
   reducers: {
     addTopicMessage: (state, action) => {
+      const allmessages: TopicMessage[] = action.payload.prepend
+        ? [action.payload.message, ...state.allMessages]
+        : [...state.allMessages, action.payload.message];
+
       const messages: TopicMessage[] = action.payload.prepend
         ? [action.payload.message, ...state.messages]
         : [...state.messages, action.payload.message];
 
       return {
         ...state,
+        allMessages: allmessages,
         messages,
       };
     },
-    resetTopicMessages: () => initialState,
+    resetTopicMessages: (state) => {
+      return {
+        ...initialState,
+        currentPage: state.currentPage,
+        allMessages: state.allMessages,
+      };
+    },
+    resetAllTopicMessages: () => initialState,
     updateTopicMessagesPhase: (state, action) => {
       state.phase = action.payload;
     },
@@ -42,6 +59,28 @@ const topicMessagesSlice = createSlice({
     setMessageEventType: (state, action) => {
       state.messageEventType = action.payload;
     },
+    updateTopicMessagesCursor: (state, action) => {
+      state.cursor = action.payload;
+    },
+    setTopicMessagesCurrentPage: (state, action) => {
+      if (state.currentPage !== action.payload) {
+        const messages: TopicMessage[] = state.allMessages.slice(
+          (action.payload - 1) * PER_PAGE,
+          (action.payload - 1) * PER_PAGE + PER_PAGE
+        );
+        return {
+          ...state,
+          currentPage: action.payload,
+          messages,
+        };
+      }
+      return {
+        ...state,
+      };
+    },
+    setTopicMessagesLastLoadedPage: (state, action) => {
+      state.lastLoadedPage = action.payload;
+    },
   },
 });
 
@@ -52,6 +91,10 @@ export const {
   updateTopicMessagesMeta,
   setTopicMessagesFetchingStatus,
   setMessageEventType,
+  updateTopicMessagesCursor,
+  setTopicMessagesCurrentPage,
+  setTopicMessagesLastLoadedPage,
+  resetAllTopicMessages,
 } = topicMessagesSlice.actions;
 
 export default topicMessagesSlice.reducer;