|
@@ -1,9 +1,9 @@
|
|
|
package com.provectus.kafka.ui.service;
|
|
|
|
|
|
import com.google.common.util.concurrent.RateLimiter;
|
|
|
-import com.provectus.kafka.ui.config.ClustersProperties;
|
|
|
-import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
|
|
-import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
|
|
+import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
|
|
+import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
|
|
+import com.provectus.kafka.ui.emitter.MessageFilterStats;
|
|
|
import com.provectus.kafka.ui.emitter.MessageFilters;
|
|
|
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
|
|
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
|
@@ -12,25 +12,23 @@ import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
|
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
|
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
|
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
|
|
|
+import com.provectus.kafka.ui.model.PollingModeDTO;
|
|
|
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
|
|
-import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
|
|
-import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
|
|
-import com.provectus.kafka.ui.model.TopicMessageDTO;
|
|
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
|
|
+import com.provectus.kafka.ui.serde.api.Serde;
|
|
|
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
|
|
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
|
|
+import com.provectus.kafka.ui.util.ResultSizeLimiter;
|
|
|
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
|
|
-import java.time.Instant;
|
|
|
-import java.time.OffsetDateTime;
|
|
|
-import java.time.ZoneOffset;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
|
-import java.util.Optional;
|
|
|
import java.util.Properties;
|
|
|
import java.util.concurrent.CompletableFuture;
|
|
|
import java.util.function.Predicate;
|
|
|
import java.util.function.UnaryOperator;
|
|
|
import java.util.stream.Collectors;
|
|
|
import javax.annotation.Nullable;
|
|
|
+import lombok.RequiredArgsConstructor;
|
|
|
import lombok.extern.slf4j.Slf4j;
|
|
|
import org.apache.commons.lang3.StringUtils;
|
|
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
|
@@ -43,39 +41,21 @@ import org.apache.kafka.common.TopicPartition;
|
|
|
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
|
|
import org.springframework.stereotype.Service;
|
|
|
import reactor.core.publisher.Flux;
|
|
|
+import reactor.core.publisher.FluxSink;
|
|
|
import reactor.core.publisher.Mono;
|
|
|
import reactor.core.scheduler.Schedulers;
|
|
|
|
|
|
@Service
|
|
|
+@RequiredArgsConstructor
|
|
|
@Slf4j
|
|
|
public class MessagesService {
|
|
|
|
|
|
- private static final int DEFAULT_MAX_PAGE_SIZE = 500;
|
|
|
- private static final int DEFAULT_PAGE_SIZE = 100;
|
|
|
// limiting UI messages rate to 20/sec in tailing mode
|
|
|
- private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
|
|
|
+ public static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
|
|
|
|
|
|
private final AdminClientService adminClientService;
|
|
|
private final DeserializationService deserializationService;
|
|
|
private final ConsumerGroupService consumerGroupService;
|
|
|
- private final int maxPageSize;
|
|
|
- private final int defaultPageSize;
|
|
|
-
|
|
|
- public MessagesService(AdminClientService adminClientService,
|
|
|
- DeserializationService deserializationService,
|
|
|
- ConsumerGroupService consumerGroupService,
|
|
|
- ClustersProperties properties) {
|
|
|
- this.adminClientService = adminClientService;
|
|
|
- this.deserializationService = deserializationService;
|
|
|
- this.consumerGroupService = consumerGroupService;
|
|
|
-
|
|
|
- var pollingProps = Optional.ofNullable(properties.getPolling())
|
|
|
- .orElseGet(ClustersProperties.PollingProperties::new);
|
|
|
- this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize())
|
|
|
- .orElse(DEFAULT_MAX_PAGE_SIZE);
|
|
|
- this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize())
|
|
|
- .orElse(DEFAULT_PAGE_SIZE);
|
|
|
- }
|
|
|
|
|
|
private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
|
|
|
return adminClientService.get(cluster)
|
|
@@ -83,40 +63,6 @@ public class MessagesService {
|
|
|
.switchIfEmpty(Mono.error(new TopicNotFoundException()));
|
|
|
}
|
|
|
|
|
|
- public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
|
|
|
- Predicate<TopicMessageDTO> predicate;
|
|
|
- try {
|
|
|
- predicate = MessageFilters.createMsgFilter(
|
|
|
- execData.getFilterCode(),
|
|
|
- MessageFilterTypeDTO.GROOVY_SCRIPT
|
|
|
- );
|
|
|
- } catch (Exception e) {
|
|
|
- log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
|
|
|
- return new SmartFilterTestExecutionResultDTO()
|
|
|
- .error("Compilation error : " + e.getMessage());
|
|
|
- }
|
|
|
- try {
|
|
|
- var result = predicate.test(
|
|
|
- new TopicMessageDTO()
|
|
|
- .key(execData.getKey())
|
|
|
- .content(execData.getValue())
|
|
|
- .headers(execData.getHeaders())
|
|
|
- .offset(execData.getOffset())
|
|
|
- .partition(execData.getPartition())
|
|
|
- .timestamp(
|
|
|
- Optional.ofNullable(execData.getTimestampMs())
|
|
|
- .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC))
|
|
|
- .orElse(null))
|
|
|
- );
|
|
|
- return new SmartFilterTestExecutionResultDTO()
|
|
|
- .result(result);
|
|
|
- } catch (Exception e) {
|
|
|
- log.info("Smart filter {} execution error", execData, e);
|
|
|
- return new SmartFilterTestExecutionResultDTO()
|
|
|
- .error("Execution error : " + e.getMessage());
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
|
|
|
List<Integer> partitionsToInclude) {
|
|
|
return withExistingTopic(cluster, topicName)
|
|
@@ -163,7 +109,13 @@ public class MessagesService {
|
|
|
msg.getValueSerde().get()
|
|
|
);
|
|
|
|
|
|
- try (KafkaProducer<byte[], byte[]> producer = createProducer(cluster, Map.of())) {
|
|
|
+ Properties properties = new Properties();
|
|
|
+ SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
|
|
|
+ properties.putAll(cluster.getProperties());
|
|
|
+ properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
|
|
+ properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
|
|
+ properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
|
|
+ try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
|
|
|
ProducerRecord<byte[], byte[]> producerRecord = producerRecordCreator.create(
|
|
|
topicDescription.name(),
|
|
|
msg.getPartition(),
|
|
@@ -185,23 +137,11 @@ public class MessagesService {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- public static KafkaProducer<byte[], byte[]> createProducer(KafkaCluster cluster,
|
|
|
- Map<String, Object> additionalProps) {
|
|
|
- Properties properties = new Properties();
|
|
|
- SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
|
|
|
- properties.putAll(cluster.getProperties());
|
|
|
- properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
|
|
- properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
|
|
- properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
|
|
- properties.putAll(additionalProps);
|
|
|
- return new KafkaProducer<>(properties);
|
|
|
- }
|
|
|
-
|
|
|
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
|
|
ConsumerPosition consumerPosition,
|
|
|
@Nullable String query,
|
|
|
MessageFilterTypeDTO filterQueryType,
|
|
|
- @Nullable Integer pageSize,
|
|
|
+ int limit,
|
|
|
SeekDirectionDTO seekDirection,
|
|
|
@Nullable String keySerde,
|
|
|
@Nullable String valueSerde) {
|
|
@@ -209,13 +149,7 @@ public class MessagesService {
|
|
|
.flux()
|
|
|
.publishOn(Schedulers.boundedElastic())
|
|
|
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
|
|
|
- filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
|
|
|
- }
|
|
|
-
|
|
|
- private int fixPageSize(@Nullable Integer pageSize) {
|
|
|
- return Optional.ofNullable(pageSize)
|
|
|
- .filter(ps -> ps > 0 && ps <= maxPageSize)
|
|
|
- .orElse(defaultPageSize);
|
|
|
+ filterQueryType, limit, seekDirection, keySerde, valueSerde));
|
|
|
}
|
|
|
|
|
|
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
|
|
@@ -228,32 +162,142 @@ public class MessagesService {
|
|
|
@Nullable String keySerde,
|
|
|
@Nullable String valueSerde) {
|
|
|
|
|
|
- var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
|
|
- var filter = getMsgFilter(query, filterQueryType);
|
|
|
- var emitter = switch (seekDirection) {
|
|
|
- case FORWARD -> new ForwardEmitter(
|
|
|
+ java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
|
|
+ ConsumerRecordDeserializer recordDeserializer =
|
|
|
+ deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
|
|
+ if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
|
|
+ emitter = new ForwardRecordEmitter(
|
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
|
- consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
|
|
+ consumerPosition,
|
|
|
+ recordDeserializer,
|
|
|
+ cluster.getThrottler().get()
|
|
|
);
|
|
|
- case BACKWARD -> new BackwardEmitter(
|
|
|
+ } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
|
|
+ emitter = new BackwardRecordEmitter(
|
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
|
- consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
|
|
+ consumerPosition,
|
|
|
+ limit,
|
|
|
+ recordDeserializer,
|
|
|
+ cluster.getThrottler().get()
|
|
|
);
|
|
|
- case TAILING -> new TailingEmitter(
|
|
|
+ } else {
|
|
|
+ emitter = new TailingEmitter(
|
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
|
- consumerPosition, deserializer, filter, cluster.getPollingSettings()
|
|
|
+ consumerPosition,
|
|
|
+ recordDeserializer,
|
|
|
+ cluster.getThrottler().get()
|
|
|
);
|
|
|
- };
|
|
|
+ }
|
|
|
+ MessageFilterStats filterStats = new MessageFilterStats();
|
|
|
return Flux.create(emitter)
|
|
|
+ .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats))
|
|
|
+ .filter(getMsgFilter(query, filterQueryType, filterStats))
|
|
|
+ .map(getDataMasker(cluster, topic))
|
|
|
+ .takeWhile(createTakeWhilePredicate(seekDirection, limit))
|
|
|
.map(throttleUiPublish(seekDirection));
|
|
|
}
|
|
|
|
|
|
- private Predicate<TopicMessageDTO> getMsgFilter(String query,
|
|
|
- MessageFilterTypeDTO filterQueryType) {
|
|
|
+ public Flux<TopicMessageEventDTO> loadMessagesV2(KafkaCluster cluster,
|
|
|
+ String topic,
|
|
|
+ PollingModeDTO pollingMode,
|
|
|
+ @Nullable String query,
|
|
|
+ @Nullable String filterId,
|
|
|
+ int limit,
|
|
|
+ @Nullable String keySerde,
|
|
|
+ @Nullable String valueSerde) {
|
|
|
+ return withExistingTopic(cluster, topic)
|
|
|
+ .flux()
|
|
|
+ .publishOn(Schedulers.boundedElastic())
|
|
|
+ .flatMap(td -> loadMessagesImplV2(cluster, topic, consumerPosition, query,
|
|
|
+ filterQueryType, limit, seekDirection, keySerde, valueSerde));
|
|
|
+ }
|
|
|
+
|
|
|
+ private Flux<TopicMessageEventDTO> loadMessagesImplV2(KafkaCluster cluster,
|
|
|
+ String topic,
|
|
|
+ ConsumerPosition consumerPosition,
|
|
|
+ @Nullable String query,
|
|
|
+ MessageFilterTypeDTO filterQueryType,
|
|
|
+ int limit,
|
|
|
+ SeekDirectionDTO seekDirection,
|
|
|
+ @Nullable String keySerde,
|
|
|
+ @Nullable String valueSerde) {
|
|
|
+
|
|
|
+ java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
|
|
+ ConsumerRecordDeserializer recordDeserializer =
|
|
|
+ deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
|
|
+ if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
|
|
+ emitter = new ForwardRecordEmitter(
|
|
|
+ () -> consumerGroupService.createConsumer(cluster),
|
|
|
+ consumerPosition,
|
|
|
+ recordDeserializer,
|
|
|
+ cluster.getThrottler().get()
|
|
|
+ );
|
|
|
+ } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
|
|
+ emitter = new BackwardRecordEmitter(
|
|
|
+ () -> consumerGroupService.createConsumer(cluster),
|
|
|
+ consumerPosition,
|
|
|
+ limit,
|
|
|
+ recordDeserializer,
|
|
|
+ cluster.getThrottler().get()
|
|
|
+ );
|
|
|
+ } else {
|
|
|
+ emitter = new TailingEmitter(
|
|
|
+ () -> consumerGroupService.createConsumer(cluster),
|
|
|
+ consumerPosition,
|
|
|
+ recordDeserializer,
|
|
|
+ cluster.getThrottler().get()
|
|
|
+ );
|
|
|
+ }
|
|
|
+ MessageFilterStats filterStats = new MessageFilterStats();
|
|
|
+ return Flux.create(emitter)
|
|
|
+ .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats))
|
|
|
+ .filter(getMsgFilter(query, filterQueryType, filterStats))
|
|
|
+ .map(getDataMasker(cluster, topic))
|
|
|
+ .takeWhile(createTakeWhilePredicate(seekDirection, limit))
|
|
|
+ .map(throttleUiPublish(seekDirection));
|
|
|
+ }
|
|
|
+
|
|
|
+ private Predicate<TopicMessageEventDTO> createTakeWhilePredicate(
|
|
|
+ SeekDirectionDTO seekDirection, int limit) {
|
|
|
+ return seekDirection == SeekDirectionDTO.TAILING
|
|
|
+ ? evt -> true // no limit for tailing
|
|
|
+ : new ResultSizeLimiter(limit);
|
|
|
+ }
|
|
|
+
|
|
|
+ private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
|
|
+ var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
|
|
+ var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
|
|
+ return evt -> {
|
|
|
+ if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
|
|
+ return evt;
|
|
|
+ }
|
|
|
+ return evt.message(
|
|
|
+ evt.getMessage()
|
|
|
+ .key(keyMasker.apply(evt.getMessage().getKey()))
|
|
|
+ .content(valMasker.apply(evt.getMessage().getContent())));
|
|
|
+ };
|
|
|
+ }
|
|
|
+
|
|
|
+ private Predicate<TopicMessageEventDTO> getMsgFilter(String query,
|
|
|
+ MessageFilterTypeDTO filterQueryType,
|
|
|
+ MessageFilterStats filterStats) {
|
|
|
if (StringUtils.isEmpty(query)) {
|
|
|
return evt -> true;
|
|
|
}
|
|
|
- return MessageFilters.createMsgFilter(query, filterQueryType);
|
|
|
+ var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType);
|
|
|
+ return evt -> {
|
|
|
+ // we only apply filter for message events
|
|
|
+ if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
|
|
+ try {
|
|
|
+ return messageFilter.test(evt.getMessage());
|
|
|
+ } catch (Exception e) {
|
|
|
+ filterStats.incrementApplyErrors();
|
|
|
+ log.trace("Error applying filter '{}' for message {}", query, evt.getMessage());
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return true;
|
|
|
+ };
|
|
|
}
|
|
|
|
|
|
private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
|