merged with master
This commit is contained in:
parent
3b8548aa30
commit
6a62fb87c6
15 changed files with 324 additions and 264 deletions
|
@ -16,13 +16,15 @@ import com.provectus.kafka.ui.model.PollingModeDTO;
|
|||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import com.provectus.kafka.ui.model.SerdeUsageDTO;
|
||||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
||||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.DeserializationService;
|
||||
import com.provectus.kafka.ui.service.MessagesService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import javax.validation.Valid;
|
||||
|
@ -43,26 +45,33 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
|
||||
private final MessagesService messagesService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||
String clusterName, String topicName, @Valid List<Integer> partitions,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_DELETE)
|
||||
.build());
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
return validateAccess(context).<ResponseEntity<Void>>then(
|
||||
messagesService.deleteTopicMessages(
|
||||
getCluster(clusterName),
|
||||
topicName,
|
||||
Optional.ofNullable(partitions).orElse(List.of())
|
||||
).thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SmartFilterTestExecutionResultDTO>> executeSmartFilterTest(
|
||||
Mono<SmartFilterTestExecutionDTO> smartFilterTestExecutionDto, ServerWebExchange exchange) {
|
||||
return smartFilterTestExecutionDto
|
||||
.map(MessagesService::execSmartFilterTest)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
|
@ -95,12 +104,17 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String valueSerde,
|
||||
String cursor,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
var contextBuilder = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_READ)
|
||||
.operationName("getTopicMessages")
|
||||
.build();
|
||||
.operationName("getTopicMessages");
|
||||
|
||||
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
|
||||
contextBuilder.auditActions(AuditAction.VIEW);
|
||||
}
|
||||
|
||||
var accessContext = contextBuilder.build();
|
||||
|
||||
Flux<TopicMessageEventDTO> messagesFlux;
|
||||
if (cursor != null) {
|
||||
|
@ -117,9 +131,9 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
valueSerde
|
||||
);
|
||||
}
|
||||
return accessControlService.validateAccess(context)
|
||||
return accessControlService.validateAccess(accessContext)
|
||||
.then(Mono.just(ResponseEntity.ok(messagesFlux)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(accessContext, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -127,17 +141,18 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_PRODUCE)
|
||||
.build());
|
||||
.operationName("sendTopicMessages")
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
return validateAccess(context).then(
|
||||
createTopicMessage.flatMap(msg ->
|
||||
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
|
||||
).map(ResponseEntity::ok)
|
||||
);
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -145,12 +160,12 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String topicName,
|
||||
SerdeUsageDTO use,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.build());
|
||||
.operationName("getSerdes")
|
||||
.build();
|
||||
|
||||
TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
|
||||
.key(use == SerdeUsageDTO.SERIALIZE
|
||||
|
@ -160,7 +175,7 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
|
||||
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
|
||||
|
||||
return validateAccess.then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(dto)
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.map(ResponseEntity::ok)
|
||||
|
|
|
@ -2,39 +2,23 @@ package com.provectus.kafka.ui.emitter;
|
|||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import jakarta.annotation.Nullable;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
public abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final MessagesProcessing messagesProcessing;
|
||||
private final PollingThrottler throttler;
|
||||
protected final PollingSettings pollingSettings;
|
||||
private final PollingSettings pollingSettings;
|
||||
|
||||
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
|
||||
this.messagesProcessing = messagesProcessing;
|
||||
this.pollingSettings = pollingSettings;
|
||||
this.throttler = pollingSettings.getPollingThrottler();
|
||||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
|
||||
return poll(sink, consumer, pollingSettings.getPollTimeout());
|
||||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer, Duration timeout) {
|
||||
Instant start = Instant.now();
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
|
||||
Instant finish = Instant.now();
|
||||
int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
|
||||
throttler.throttleAfterPoll(polledBytes);
|
||||
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
|
||||
var records = consumer.pollEnhanced(pollingSettings.getPollTimeout());
|
||||
sendConsuming(sink, records);
|
||||
return records;
|
||||
}
|
||||
|
||||
|
@ -42,19 +26,18 @@ public abstract class AbstractEmitter implements java.util.function.Consumer<Flu
|
|||
return messagesProcessing.limitReached();
|
||||
}
|
||||
|
||||
protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecord<Bytes, Bytes> msg) {
|
||||
messagesProcessing.sendMsg(sink, msg);
|
||||
protected void send(FluxSink<TopicMessageEventDTO> sink,
|
||||
Iterable<ConsumerRecord<Bytes, Bytes>> records,
|
||||
@Nullable Cursor.Tracking cursor) {
|
||||
messagesProcessing.send(sink, records, cursor);
|
||||
}
|
||||
|
||||
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
|
||||
messagesProcessing.sendPhase(sink, name);
|
||||
}
|
||||
|
||||
protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> records,
|
||||
long elapsed) {
|
||||
return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
|
||||
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink, PolledRecords records) {
|
||||
messagesProcessing.sentConsumingInfo(sink, records);
|
||||
}
|
||||
|
||||
// cursor is null if target partitions were fully polled (no, need to do paging)
|
||||
|
|
|
@ -18,18 +18,15 @@ public class BackwardEmitter extends RangePollingEmitter {
|
|||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
PollingSettings pollingSettings,
|
||||
Cursor.Tracking cursor) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
false,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
new MessagesProcessing(deserializer, filter, false, messagesPerPage),
|
||||
pollingSettings,
|
||||
cursor
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,10 +3,7 @@ package com.provectus.kafka.ui.emitter;
|
|||
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
class ConsumingStats {
|
||||
|
@ -14,27 +11,24 @@ class ConsumingStats {
|
|||
private long bytes = 0;
|
||||
private int records = 0;
|
||||
private long elapsed = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
/**
|
||||
* returns bytes polled.
|
||||
*/
|
||||
int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed,
|
||||
int filterApplyErrors) {
|
||||
int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
|
||||
bytes += polledBytes;
|
||||
this.records += polledRecords.count();
|
||||
this.elapsed += elapsed;
|
||||
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
bytes += polledRecords.bytes();
|
||||
records += polledRecords.count();
|
||||
elapsed += polledRecords.elapsed().toMillis();
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
.consuming(createConsumingStats())
|
||||
);
|
||||
return polledBytes;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors, @Nullable Cursor.Tracking cursor) {
|
||||
void incFilterApplyError() {
|
||||
filterApplyErrors++;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.DONE)
|
||||
|
@ -43,17 +37,16 @@ class ConsumingStats {
|
|||
? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor())
|
||||
: null
|
||||
)
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
.consuming(createConsumingStats())
|
||||
);
|
||||
}
|
||||
|
||||
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
|
||||
int filterApplyErrors) {
|
||||
private TopicMessageConsumingDTO createConsumingStats() {
|
||||
return new TopicMessageConsumingDTO()
|
||||
.bytesConsumed(this.bytes)
|
||||
.elapsedMs(this.elapsed)
|
||||
.isCancelled(sink.isCancelled())
|
||||
.bytesConsumed(bytes)
|
||||
.elapsedMs(elapsed)
|
||||
.isCancelled(false)
|
||||
.filterApplyErrors(filterApplyErrors)
|
||||
.messagesConsumed(this.records);
|
||||
.messagesConsumed(records);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.google.common.collect.HashBasedTable;
|
||||
import com.google.common.collect.Table;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.PollingModeDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
|
@ -20,32 +22,41 @@ public record Cursor(ConsumerRecordDeserializer deserializer,
|
|||
private final ConsumerPosition originalPosition;
|
||||
private final Predicate<TopicMessageDTO> filter;
|
||||
private final int limit;
|
||||
private final Function<Cursor, String> cursorRegistry;
|
||||
private final Function<Cursor, String> registerAction;
|
||||
|
||||
private final Map<TopicPartition, Long> trackingOffsets = new HashMap<>();
|
||||
//topic -> partition -> offset
|
||||
private final Table<String, Integer, Long> trackingOffsets = HashBasedTable.create();
|
||||
|
||||
public Tracking(ConsumerRecordDeserializer deserializer,
|
||||
ConsumerPosition originalPosition,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
int limit,
|
||||
Function<Cursor, String> cursorRegistry) {
|
||||
Function<Cursor, String> registerAction) {
|
||||
this.deserializer = deserializer;
|
||||
this.originalPosition = originalPosition;
|
||||
this.filter = filter;
|
||||
this.limit = limit;
|
||||
this.cursorRegistry = cursorRegistry;
|
||||
this.registerAction = registerAction;
|
||||
}
|
||||
|
||||
void trackOffset(TopicPartition tp, long offset) {
|
||||
trackingOffsets.put(tp, offset);
|
||||
void trackOffset(String topic, int partition, long offset) {
|
||||
trackingOffsets.put(topic, partition, offset);
|
||||
}
|
||||
|
||||
void trackOffsets(Map<TopicPartition, Long> offsets) {
|
||||
this.trackingOffsets.putAll(offsets);
|
||||
void initOffsets(Map<TopicPartition, Long> initialSeekOffsets) {
|
||||
initialSeekOffsets.forEach((tp, off) -> trackOffset(tp.topic(), tp.partition(), off));
|
||||
}
|
||||
|
||||
private Map<TopicPartition, Long> getOffsetsMap(int offsetToAdd) {
|
||||
Map<TopicPartition, Long> result = new HashMap<>();
|
||||
trackingOffsets.rowMap()
|
||||
.forEach((topic, partsMap) ->
|
||||
partsMap.forEach((p, off) -> result.put(new TopicPartition(topic, p), off + offsetToAdd)));
|
||||
return result;
|
||||
}
|
||||
|
||||
String registerCursor() {
|
||||
return cursorRegistry.apply(
|
||||
return registerAction.apply(
|
||||
new Cursor(
|
||||
deserializer,
|
||||
new ConsumerPosition(
|
||||
|
@ -57,7 +68,17 @@ public record Cursor(ConsumerRecordDeserializer deserializer,
|
|||
originalPosition.topic(),
|
||||
originalPosition.partitions(),
|
||||
null,
|
||||
new ConsumerPosition.Offsets(null, trackingOffsets)
|
||||
new ConsumerPosition.Offsets(
|
||||
null,
|
||||
getOffsetsMap(
|
||||
switch (originalPosition.pollingMode()) {
|
||||
case TO_OFFSET, TO_TIMESTAMP, LATEST -> 0;
|
||||
// when doing forward polling we need to start from latest msg's offset + 1
|
||||
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> 1;
|
||||
case TAILING -> throw new IllegalStateException();
|
||||
}
|
||||
)
|
||||
)
|
||||
),
|
||||
filter,
|
||||
limit
|
||||
|
|
|
@ -18,18 +18,15 @@ public class ForwardEmitter extends RangePollingEmitter {
|
|||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
PollingSettings pollingSettings,
|
||||
Cursor.Tracking cursor) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
true,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
new MessagesProcessing(deserializer, filter, true, messagesPerPage),
|
||||
pollingSettings,
|
||||
cursor
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,71 +1,80 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static java.util.stream.Collectors.collectingAndThen;
|
||||
import static java.util.stream.Collectors.groupingBy;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class MessagesProcessing {
|
||||
@RequiredArgsConstructor
|
||||
class MessagesProcessing {
|
||||
|
||||
private final ConsumingStats consumingStats = new ConsumingStats();
|
||||
private long sentMessages = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
private final ConsumerRecordDeserializer deserializer;
|
||||
private final Predicate<TopicMessageDTO> filter;
|
||||
private final boolean ascendingSortBeforeSend;
|
||||
private final @Nullable Integer limit;
|
||||
|
||||
public MessagesProcessing(ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
@Nullable Integer limit) {
|
||||
this.deserializer = deserializer;
|
||||
this.filter = filter;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
boolean limitReached() {
|
||||
return limit != null && sentMessages >= limit;
|
||||
}
|
||||
|
||||
void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
if (!sink.isCancelled() && !limitReached()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
filterApplyErrors++;
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
void send(FluxSink<TopicMessageEventDTO> sink,
|
||||
Iterable<ConsumerRecord<Bytes, Bytes>> polled,
|
||||
@Nullable Cursor.Tracking cursor) {
|
||||
sortForSending(polled, ascendingSortBeforeSend)
|
||||
.forEach(rec -> {
|
||||
if (!limitReached() && !sink.isCancelled()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
if (cursor != null) {
|
||||
cursor.trackOffset(rec.topic(), rec.partition(), rec.offset());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
consumingStats.incFilterApplyError();
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
int sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed) {
|
||||
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
if (!sink.isCancelled()) {
|
||||
return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors);
|
||||
consumingStats.sendConsumingEvt(sink, polledRecords);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sendFinishEvents(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendFinishEvent(sink, filterApplyErrors, cursor);
|
||||
consumingStats.sendFinishEvent(sink, cursor);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,4 +88,30 @@ public class MessagesProcessing {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Iterable<ConsumerRecord<Bytes, Bytes>> sortForSending(Iterable<ConsumerRecord<Bytes, Bytes>> records,
|
||||
boolean asc) {
|
||||
Comparator<ConsumerRecord> offsetComparator = asc
|
||||
? Comparator.comparingLong(ConsumerRecord::offset)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::offset).reversed();
|
||||
|
||||
// partition -> sorted by offsets records
|
||||
Map<Integer, List<ConsumerRecord<Bytes, Bytes>>> perPartition = Streams.stream(records)
|
||||
.collect(
|
||||
groupingBy(
|
||||
ConsumerRecord::partition,
|
||||
TreeMap::new,
|
||||
collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList())));
|
||||
|
||||
Comparator<ConsumerRecord> tsComparator = asc
|
||||
? Comparator.comparing(ConsumerRecord::timestamp)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::timestamp).reversed();
|
||||
|
||||
// merge-sorting records from partitions one by one using timestamp comparator
|
||||
return Iterables.mergeSorted(perPartition.values(), tsComparator);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ import reactor.core.publisher.FluxSink;
|
|||
abstract class RangePollingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final Cursor.Tracking cursor;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
protected final int messagesPerPage;
|
||||
|
||||
|
@ -24,11 +25,13 @@ abstract class RangePollingEmitter extends AbstractEmitter {
|
|||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
PollingSettings pollingSettings,
|
||||
Cursor.Tracking cursor) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.cursor = cursor;
|
||||
}
|
||||
|
||||
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
|
||||
|
@ -46,18 +49,20 @@ abstract class RangePollingEmitter extends AbstractEmitter {
|
|||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Consumer created");
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
cursor.initOffsets(seekOperations.getOffsetsForSeek());
|
||||
|
||||
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
|
||||
log.debug("Starting from offsets {}", pollRange);
|
||||
|
||||
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
|
||||
while (!sink.isCancelled() && !pollRange.isEmpty() && !isSendLimitReached()) {
|
||||
var polled = poll(consumer, sink, pollRange);
|
||||
send(sink, polled);
|
||||
send(sink, polled, cursor);
|
||||
pollRange = nextPollingRange(pollRange, seekOperations);
|
||||
}
|
||||
if (sink.isCancelled()) {
|
||||
log.debug("Polling finished due to sink cancellation");
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
sendFinishStatsAndCompleteSink(sink, pollRange.isEmpty() ? null : cursor);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
|
|
|
@ -20,7 +20,7 @@ class SeekOperations {
|
|||
private final OffsetsInfo offsetsInfo;
|
||||
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||
|
||||
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty()
|
||||
? new OffsetsInfo(consumer, consumerPosition.topic())
|
||||
: new OffsetsInfo(consumer, consumerPosition.partitions());
|
||||
|
@ -28,7 +28,7 @@ class SeekOperations {
|
|||
return new SeekOperations(consumer, offsetsInfo, offsetsToSeek);
|
||||
}
|
||||
|
||||
void assignAndSeek() {
|
||||
public void assignAndSeekNonEmptyPartitions() {
|
||||
consumer.assign(offsetsForSeek.keySet());
|
||||
offsetsForSeek.forEach(consumer::seek);
|
||||
}
|
||||
|
@ -69,8 +69,7 @@ class SeekOperations {
|
|||
if (positionOffset.offset() != null) {
|
||||
offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset()));
|
||||
} else {
|
||||
requireNonNull(positionOffset.tpOffsets());
|
||||
offsets.putAll(positionOffset.tpOffsets());
|
||||
offsets.putAll(requireNonNull(positionOffset.tpOffsets()));
|
||||
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
}
|
||||
|
||||
|
|
|
@ -1,24 +1,28 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.HashMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class TailingEmitter extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
public class TailingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public TailingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
MessagesProcessing messagesProcessing,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings);
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
@ -27,12 +31,11 @@ public class TailingEmitter extends AbstractEmitter
|
|||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting tailing polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
SeekOperations.create(consumer, consumerPosition)
|
||||
.assignAndSeek();
|
||||
assignAndSeek(consumer);
|
||||
while (!sink.isCancelled()) {
|
||||
sendPhase(sink, "Polling");
|
||||
var polled = poll(sink, consumer);
|
||||
polled.forEach(r -> sendMessage(sink, r));
|
||||
send(sink, polled, null);
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Tailing finished");
|
||||
|
@ -45,4 +48,11 @@ public class TailingEmitter extends AbstractEmitter
|
|||
}
|
||||
}
|
||||
|
||||
private void assignAndSeek(EnhancedConsumer consumer) {
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
|
||||
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions
|
||||
consumer.assign(seekOffsets.keySet());
|
||||
seekOffsets.forEach(consumer::seek);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,25 +6,21 @@ import com.google.common.cache.CacheBuilder;
|
|||
import com.google.common.hash.Hashing;
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.Cursor;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessageFilters;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
||||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
|
||||
import com.provectus.kafka.ui.model.PollingModeDTO;
|
||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
||||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
|
@ -252,81 +248,45 @@ public class MessagesService {
|
|||
return withExistingTopic(cluster, topic)
|
||||
.flux()
|
||||
.publishOn(Schedulers.boundedElastic())
|
||||
.flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, limit));
|
||||
.flatMap(td -> loadMessagesImpl(cluster, deserializer, consumerPosition, filter, limit));
|
||||
}
|
||||
|
||||
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
|
||||
String topic,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
ConsumerPosition consumerPosition,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
int limit) {
|
||||
var processing = new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
consumerPosition.pollingMode() == PollingModeDTO.TAILING ? null : limit
|
||||
);
|
||||
|
||||
var emitter = switch (consumerPosition.pollingMode()) {
|
||||
case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter(
|
||||
case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
limit,
|
||||
processing,
|
||||
deserializer,
|
||||
filter,
|
||||
cluster.getPollingSettings(),
|
||||
new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register)
|
||||
cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
|
||||
);
|
||||
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter(
|
||||
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
processing,
|
||||
limit,
|
||||
deserializer,
|
||||
filter,
|
||||
cluster.getPollingSettings(),
|
||||
new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register)
|
||||
cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
|
||||
);
|
||||
case TAILING -> new TailingEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
processing,
|
||||
deserializer,
|
||||
filter,
|
||||
cluster.getPollingSettings()
|
||||
);
|
||||
};
|
||||
return Flux.create(emitter)
|
||||
.map(getDataMasker(cluster, topic))
|
||||
.map(throttleUiPublish(consumerPosition.pollingMode()));
|
||||
}
|
||||
|
||||
private int fixPageSize(@Nullable Integer pageSize) {
|
||||
return Optional.ofNullable(pageSize)
|
||||
.filter(ps -> ps > 0 && ps <= maxPageSize)
|
||||
.orElse(defaultPageSize);
|
||||
}
|
||||
|
||||
public String registerMessageFilter(String groovyCode) {
|
||||
String saltedCode = groovyCode + SALT_FOR_HASHING;
|
||||
String filterId = Hashing.sha256()
|
||||
.hashString(saltedCode, Charsets.UTF_8)
|
||||
.toString()
|
||||
.substring(0, 8);
|
||||
if (registeredFilters.getIfPresent(filterId) == null) {
|
||||
registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode));
|
||||
}
|
||||
return filterId;
|
||||
}
|
||||
|
||||
private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
||||
var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
||||
var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
||||
return evt -> {
|
||||
if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
||||
return evt;
|
||||
}
|
||||
return evt.message(
|
||||
evt.getMessage()
|
||||
.key(keyMasker.apply(evt.getMessage().getKey()))
|
||||
.content(valMasker.apply(evt.getMessage().getContent())));
|
||||
};
|
||||
}
|
||||
|
||||
private Predicate<TopicMessageDTO> getMsgFilter(@Nullable String containsStrFilter,
|
||||
@Nullable String smartFilterId) {
|
||||
Predicate<TopicMessageDTO> messageFilter = MessageFilters.noop();
|
||||
|
@ -356,4 +316,22 @@ public class MessagesService {
|
|||
return UnaryOperator.identity();
|
||||
}
|
||||
|
||||
private int fixPageSize(@Nullable Integer pageSize) {
|
||||
return Optional.ofNullable(pageSize)
|
||||
.filter(ps -> ps > 0 && ps <= maxPageSize)
|
||||
.orElse(defaultPageSize);
|
||||
}
|
||||
|
||||
public String registerMessageFilter(String groovyCode) {
|
||||
String saltedCode = groovyCode + SALT_FOR_HASHING;
|
||||
String filterId = Hashing.sha256()
|
||||
.hashString(saltedCode, Charsets.UTF_8)
|
||||
.toString()
|
||||
.substring(0, 8);
|
||||
if (registeredFilters.getIfPresent(filterId) == null) {
|
||||
registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode));
|
||||
}
|
||||
return filterId;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -4,8 +4,12 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.google.common.cache.Cache;
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.provectus.kafka.ui.emitter.Cursor;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
import org.apache.commons.lang3.RandomStringUtils;
|
||||
|
||||
public class PollingCursorsStorage {
|
||||
|
@ -16,6 +20,14 @@ public class PollingCursorsStorage {
|
|||
.maximumSize(MAX_SIZE)
|
||||
.build();
|
||||
|
||||
|
||||
public Cursor.Tracking createNewCursor(ConsumerRecordDeserializer deserializer,
|
||||
ConsumerPosition originalPosition,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
int limit) {
|
||||
return new Cursor.Tracking(deserializer, originalPosition, filter, limit, this::register);
|
||||
}
|
||||
|
||||
public Optional<Cursor> getCursor(String id) {
|
||||
return Optional.ofNullable(cursorsCache.getIfPresent(id));
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui.service.analyze;
|
||||
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
||||
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
|
||||
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.emitter.SeekOperations;
|
||||
|
@ -14,6 +14,7 @@ import java.io.Closeable;
|
|||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
@ -104,7 +105,8 @@ public class TopicAnalysisService {
|
|||
consumer.partitionsFor(topicId.topicName)
|
||||
.forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats()));
|
||||
|
||||
var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null));
|
||||
var seekOperations =
|
||||
SeekOperations.create(consumer, new ConsumerPosition(EARLIEST, topicId.topicName, List.of(), null, null));
|
||||
long summaryOffsetsRange = seekOperations.summaryOffsetsRange();
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
|
|
|
@ -117,56 +117,56 @@ class CursorTest extends AbstractIntegrationTest {
|
|||
.verifyComplete();
|
||||
}
|
||||
|
||||
private BackwardRecordEmitter createBackwardEmitter(ConsumerPosition position) {
|
||||
return new BackwardRecordEmitter(
|
||||
private BackwardEmitter createBackwardEmitter(ConsumerPosition position) {
|
||||
return new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
position,
|
||||
PAGE_SIZE,
|
||||
new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE),
|
||||
createRecordsDeserializer(),
|
||||
m -> true,
|
||||
PollingSettings.createDefault(),
|
||||
createCursor(position)
|
||||
);
|
||||
}
|
||||
|
||||
private BackwardRecordEmitter createBackwardEmitterWithCursor(Cursor cursor) {
|
||||
return new BackwardRecordEmitter(
|
||||
private BackwardEmitter createBackwardEmitterWithCursor(Cursor cursor) {
|
||||
return new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
cursor.consumerPosition(),
|
||||
cursor.limit(),
|
||||
new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE),
|
||||
cursor.deserializer(),
|
||||
cursor.filter(),
|
||||
PollingSettings.createDefault(),
|
||||
createCursor(cursor.consumerPosition())
|
||||
);
|
||||
}
|
||||
|
||||
private ForwardRecordEmitter createForwardEmitterWithCursor(Cursor cursor) {
|
||||
return new ForwardRecordEmitter(
|
||||
private ForwardEmitter createForwardEmitterWithCursor(Cursor cursor) {
|
||||
return new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
cursor.consumerPosition(),
|
||||
new MessagesProcessing(cursor.deserializer(), cursor.filter(), PAGE_SIZE),
|
||||
cursor.limit(),
|
||||
cursor.deserializer(),
|
||||
cursor.filter(),
|
||||
PollingSettings.createDefault(),
|
||||
createCursor(cursor.consumerPosition())
|
||||
);
|
||||
}
|
||||
|
||||
private ForwardRecordEmitter createForwardEmitter(ConsumerPosition position) {
|
||||
return new ForwardRecordEmitter(
|
||||
private ForwardEmitter createForwardEmitter(ConsumerPosition position) {
|
||||
return new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
position,
|
||||
new MessagesProcessing(createRecordsDeserializer(), m -> true, PAGE_SIZE),
|
||||
PAGE_SIZE,
|
||||
createRecordsDeserializer(),
|
||||
m -> true,
|
||||
PollingSettings.createDefault(),
|
||||
createCursor(position)
|
||||
);
|
||||
}
|
||||
|
||||
private Cursor.Tracking createCursor(ConsumerPosition position) {
|
||||
return new Cursor.Tracking(
|
||||
createRecordsDeserializer(),
|
||||
position,
|
||||
m -> true,
|
||||
PAGE_SIZE,
|
||||
cursorsStorage::register
|
||||
);
|
||||
return cursorsStorage.createNewCursor(createRecordsDeserializer(), position, m -> true, PAGE_SIZE);
|
||||
}
|
||||
|
||||
private EnhancedConsumer createConsumer() {
|
||||
|
@ -187,7 +187,8 @@ class CursorTest extends AbstractIntegrationTest {
|
|||
s.deserializer(null, Serde.Target.VALUE),
|
||||
StringSerde.name(),
|
||||
s.deserializer(null, Serde.Target.KEY),
|
||||
s.deserializer(null, Serde.Target.VALUE)
|
||||
s.deserializer(null, Serde.Target.VALUE),
|
||||
msg -> msg
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -9,19 +9,21 @@ import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
|
|||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.AbstractIntegrationTest;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.Cursor;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.PollingSettings;
|
||||
import com.provectus.kafka.ui.emitter.PollingThrottler;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition.Offsets;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.producer.KafkaTestProducer;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
|
||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -32,17 +34,15 @@ import java.util.UUID;
|
|||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Value;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -62,6 +62,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
static final List<Record> SENT_RECORDS = new ArrayList<>();
|
||||
static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer();
|
||||
static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class);
|
||||
static final Predicate<TopicMessageDTO> NOOP_FILTER = m -> true;
|
||||
|
||||
@BeforeAll
|
||||
static void generateMsgs() throws Exception {
|
||||
|
@ -98,6 +99,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
static void cleanup() {
|
||||
deleteTopic(TOPIC);
|
||||
deleteTopic(EMPTY_TOPIC);
|
||||
SENT_RECORDS.clear();
|
||||
}
|
||||
|
||||
private static ConsumerRecordDeserializer createRecordsDeserializer() {
|
||||
|
@ -110,29 +112,29 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
s.deserializer(null, Serde.Target.VALUE),
|
||||
StringSerde.name(),
|
||||
s.deserializer(null, Serde.Target.KEY),
|
||||
s.deserializer(null, Serde.Target.VALUE)
|
||||
s.deserializer(null, Serde.Target.VALUE),
|
||||
msg -> msg
|
||||
);
|
||||
}
|
||||
|
||||
private MessagesProcessing createMessagesProcessing() {
|
||||
return new MessagesProcessing(RECORD_DESERIALIZER, msg -> true, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
void pollNothingOnEmptyTopic() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
|
||||
createMessagesProcessing(),
|
||||
100,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
|
||||
100,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -152,19 +154,22 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
@Test
|
||||
void pollFullTopicFromBeginning() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null),
|
||||
createMessagesProcessing(),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(LATEST, TOPIC, List.of(), null, null),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -183,21 +188,24 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
targetOffsets.put(new TopicPartition(TOPIC, i), offset);
|
||||
}
|
||||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
|
||||
new Offsets(null, targetOffsets)),
|
||||
createMessagesProcessing(),
|
||||
new ConsumerPosition.Offsets(null, targetOffsets)),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
|
||||
new Offsets(null, targetOffsets)),
|
||||
new ConsumerPosition.Offsets(null, targetOffsets)),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -223,10 +231,12 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
//choosing ts in the middle
|
||||
long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2);
|
||||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
|
||||
createMessagesProcessing(),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -239,11 +249,12 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
.collect(Collectors.toList())
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -265,12 +276,13 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
targetOffsets.put(new TopicPartition(TOPIC, i), (long) MSGS_PER_PARTITION);
|
||||
}
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
|
||||
new Offsets(null, targetOffsets)),
|
||||
new ConsumerPosition.Offsets(null, targetOffsets)),
|
||||
numMessages,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -293,11 +305,13 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
offsets.put(new TopicPartition(TOPIC, i), 0L);
|
||||
}
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null, new Offsets(null, offsets)),
|
||||
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null,
|
||||
new ConsumerPosition.Offsets(null, offsets)),
|
||||
100,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault(),
|
||||
CURSOR_MOCK
|
||||
);
|
||||
|
@ -338,22 +352,20 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
assertionsConsumer.accept(step.expectComplete().verifyThenAssertThat());
|
||||
}
|
||||
|
||||
private KafkaConsumer<Bytes, Bytes> createConsumer() {
|
||||
private EnhancedConsumer createConsumer() {
|
||||
return createConsumer(Map.of());
|
||||
}
|
||||
|
||||
private KafkaConsumer<Bytes, Bytes> createConsumer(Map<String, Object> properties) {
|
||||
private EnhancedConsumer createConsumer(Map<String, Object> properties) {
|
||||
final Map<String, ? extends Serializable> map = Map.of(
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
|
||||
ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
|
||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19, // to check multiple polls
|
||||
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
|
||||
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
|
||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19 // to check multiple polls
|
||||
);
|
||||
Properties props = new Properties();
|
||||
props.putAll(map);
|
||||
props.putAll(properties);
|
||||
return new KafkaConsumer<>(props);
|
||||
return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop());
|
||||
}
|
||||
|
||||
@Value
|
||||
|
|
Loading…
Add table
Reference in a new issue