|
@@ -1,11 +1,16 @@
|
|
package com.provectus.kafka.ui.service;
|
|
package com.provectus.kafka.ui.service;
|
|
|
|
|
|
|
|
+import com.google.common.base.Charsets;
|
|
|
|
+import com.google.common.cache.Cache;
|
|
|
|
+import com.google.common.cache.CacheBuilder;
|
|
|
|
+import com.google.common.hash.Hashing;
|
|
import com.google.common.util.concurrent.RateLimiter;
|
|
import com.google.common.util.concurrent.RateLimiter;
|
|
|
|
+import com.provectus.kafka.ui.config.ClustersProperties;
|
|
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
|
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
|
|
|
+import com.provectus.kafka.ui.emitter.Cursor;
|
|
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
|
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
|
-import com.provectus.kafka.ui.emitter.MessageFilterStats;
|
|
|
|
import com.provectus.kafka.ui.emitter.MessageFilters;
|
|
import com.provectus.kafka.ui.emitter.MessageFilters;
|
|
-import com.provectus.kafka.ui.emitter.ResultSizeLimiter;
|
|
|
|
|
|
+import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
|
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
|
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
|
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
|
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
|
import com.provectus.kafka.ui.exception.ValidationException;
|
|
import com.provectus.kafka.ui.exception.ValidationException;
|
|
@@ -21,16 +26,15 @@ import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
|
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
|
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
|
import java.util.List;
|
|
import java.util.List;
|
|
import java.util.Map;
|
|
import java.util.Map;
|
|
|
|
+import java.util.Optional;
|
|
import java.util.Properties;
|
|
import java.util.Properties;
|
|
import java.util.concurrent.CompletableFuture;
|
|
import java.util.concurrent.CompletableFuture;
|
|
-import java.util.concurrent.ConcurrentHashMap;
|
|
|
|
|
|
+import java.util.concurrent.ThreadLocalRandom;
|
|
import java.util.function.Predicate;
|
|
import java.util.function.Predicate;
|
|
import java.util.function.UnaryOperator;
|
|
import java.util.function.UnaryOperator;
|
|
import java.util.stream.Collectors;
|
|
import java.util.stream.Collectors;
|
|
import javax.annotation.Nullable;
|
|
import javax.annotation.Nullable;
|
|
-import lombok.RequiredArgsConstructor;
|
|
|
|
import lombok.extern.slf4j.Slf4j;
|
|
import lombok.extern.slf4j.Slf4j;
|
|
-import org.apache.commons.lang3.RandomStringUtils;
|
|
|
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
|
import org.apache.kafka.clients.admin.TopicDescription;
|
|
import org.apache.kafka.clients.admin.TopicDescription;
|
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
|
@@ -45,18 +49,43 @@ import reactor.core.publisher.Mono;
|
|
import reactor.core.scheduler.Schedulers;
|
|
import reactor.core.scheduler.Schedulers;
|
|
|
|
|
|
@Service
|
|
@Service
|
|
-@RequiredArgsConstructor
|
|
|
|
@Slf4j
|
|
@Slf4j
|
|
public class MessagesService {
|
|
public class MessagesService {
|
|
|
|
|
|
|
|
+ private static final long SALT_FOR_HASHING = ThreadLocalRandom.current().nextLong();
|
|
|
|
+
|
|
|
|
+ private static final int DEFAULT_MAX_PAGE_SIZE = 500;
|
|
|
|
+ private static final int DEFAULT_PAGE_SIZE = 100;
|
|
// limiting UI messages rate to 20/sec in tailing mode
|
|
// limiting UI messages rate to 20/sec in tailing mode
|
|
- public static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
|
|
|
|
|
|
+ private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
|
|
|
|
|
|
private final AdminClientService adminClientService;
|
|
private final AdminClientService adminClientService;
|
|
private final DeserializationService deserializationService;
|
|
private final DeserializationService deserializationService;
|
|
private final ConsumerGroupService consumerGroupService;
|
|
private final ConsumerGroupService consumerGroupService;
|
|
|
|
+ private final int maxPageSize;
|
|
|
|
+ private final int defaultPageSize;
|
|
|
|
+
|
|
|
|
+ private final Cache<String, Predicate<TopicMessageDTO>> registeredFilters = CacheBuilder.newBuilder()
|
|
|
|
+ .maximumSize(5_000)
|
|
|
|
+ .build();
|
|
|
|
+
|
|
|
|
+ private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
|
|
|
|
|
|
- private final Map<String, Predicate<TopicMessageDTO>> registeredFilters = new ConcurrentHashMap<>();
|
|
|
|
|
|
+ public MessagesService(AdminClientService adminClientService,
|
|
|
|
+ DeserializationService deserializationService,
|
|
|
|
+ ConsumerGroupService consumerGroupService,
|
|
|
|
+ ClustersProperties properties) {
|
|
|
|
+ this.adminClientService = adminClientService;
|
|
|
|
+ this.deserializationService = deserializationService;
|
|
|
|
+ this.consumerGroupService = consumerGroupService;
|
|
|
|
+
|
|
|
|
+ var pollingProps = Optional.ofNullable(properties.getPolling())
|
|
|
|
+ .orElseGet(ClustersProperties.PollingProperties::new);
|
|
|
|
+ this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize())
|
|
|
|
+ .orElse(DEFAULT_MAX_PAGE_SIZE);
|
|
|
|
+ this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize())
|
|
|
|
+ .orElse(DEFAULT_PAGE_SIZE);
|
|
|
|
+ }
|
|
|
|
|
|
private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
|
|
private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
|
|
return adminClientService.get(cluster)
|
|
return adminClientService.get(cluster)
|
|
@@ -138,118 +167,135 @@ public class MessagesService {
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- public Flux<TopicMessageEventDTO> loadMessagesV2(KafkaCluster cluster,
|
|
|
|
- String topic,
|
|
|
|
- ConsumerPosition position,
|
|
|
|
- @Nullable String query,
|
|
|
|
- @Nullable String filterId,
|
|
|
|
- int limit,
|
|
|
|
- @Nullable String keySerde,
|
|
|
|
- @Nullable String valueSerde) {
|
|
|
|
|
|
+ private int fixPageSize(@Nullable Integer pageSize) {
|
|
|
|
+ return Optional.ofNullable(pageSize)
|
|
|
|
+ .filter(ps -> ps > 0 && ps <= maxPageSize)
|
|
|
|
+ .orElse(defaultPageSize);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
|
|
|
+ var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
|
|
|
+ var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
|
|
|
+ return evt -> {
|
|
|
|
+ if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
|
|
|
+ return evt;
|
|
|
|
+ }
|
|
|
|
+ return evt.message(
|
|
|
|
+ evt.getMessage()
|
|
|
|
+ .key(keyMasker.apply(evt.getMessage().getKey()))
|
|
|
|
+ .content(valMasker.apply(evt.getMessage().getContent())));
|
|
|
|
+ };
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
|
|
|
|
+ String topic,
|
|
|
|
+ ConsumerPosition consumerPosition,
|
|
|
|
+ @Nullable String containsStringFilter,
|
|
|
|
+ @Nullable String filterId,
|
|
|
|
+ @Nullable Integer limit,
|
|
|
|
+ @Nullable String keySerde,
|
|
|
|
+ @Nullable String valueSerde) {
|
|
|
|
+ return loadMessages(
|
|
|
|
+ cluster,
|
|
|
|
+ topic,
|
|
|
|
+ deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
|
|
|
|
+ consumerPosition,
|
|
|
|
+ getMsgFilter(containsStringFilter, filterId),
|
|
|
|
+ fixPageSize(limit)
|
|
|
|
+ );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic, String cursorId) {
|
|
|
|
+ Cursor cursor = cursorsStorage.getCursor(cursorId)
|
|
|
|
+ .orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache."));
|
|
|
|
+ return loadMessages(
|
|
|
|
+ cluster,
|
|
|
|
+ topic,
|
|
|
|
+ cursor.deserializer(),
|
|
|
|
+ cursor.consumerPosition(),
|
|
|
|
+ cursor.filter(),
|
|
|
|
+ cursor.limit()
|
|
|
|
+ );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ private Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
|
|
|
|
+ String topic,
|
|
|
|
+ ConsumerRecordDeserializer deserializer,
|
|
|
|
+ ConsumerPosition consumerPosition,
|
|
|
|
+ Predicate<TopicMessageDTO> filter,
|
|
|
|
+ int limit) {
|
|
return withExistingTopic(cluster, topic)
|
|
return withExistingTopic(cluster, topic)
|
|
.flux()
|
|
.flux()
|
|
.publishOn(Schedulers.boundedElastic())
|
|
.publishOn(Schedulers.boundedElastic())
|
|
- .flatMap(td -> loadMessagesImplV2(cluster, topic, position, query, filterId, limit, keySerde, valueSerde));
|
|
|
|
|
|
+ .flatMap(td -> loadMessagesImpl(cluster, topic, deserializer, consumerPosition, filter, fixPageSize(limit)));
|
|
}
|
|
}
|
|
|
|
|
|
- private Flux<TopicMessageEventDTO> loadMessagesImplV2(KafkaCluster cluster,
|
|
|
|
- String topic,
|
|
|
|
- ConsumerPosition consumerPosition,
|
|
|
|
- @Nullable String query,
|
|
|
|
- @Nullable String filterId,
|
|
|
|
- int limit,
|
|
|
|
- @Nullable String keySerde,
|
|
|
|
- @Nullable String valueSerde) {
|
|
|
|
-
|
|
|
|
- ConsumerRecordDeserializer recordDeserializer =
|
|
|
|
- deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
|
|
|
|
|
+ private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
|
|
|
|
+ String topic,
|
|
|
|
+ ConsumerRecordDeserializer deserializer,
|
|
|
|
+ ConsumerPosition consumerPosition,
|
|
|
|
+ Predicate<TopicMessageDTO> filter,
|
|
|
|
+ int limit) {
|
|
|
|
+ var processing = new MessagesProcessing(
|
|
|
|
+ deserializer,
|
|
|
|
+ filter,
|
|
|
|
+ consumerPosition.pollingMode() == PollingModeDTO.TAILING ? null : limit
|
|
|
|
+ );
|
|
|
|
|
|
var emitter = switch (consumerPosition.pollingMode()) {
|
|
var emitter = switch (consumerPosition.pollingMode()) {
|
|
case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter(
|
|
case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardRecordEmitter(
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
consumerPosition,
|
|
consumerPosition,
|
|
limit,
|
|
limit,
|
|
- recordDeserializer,
|
|
|
|
- cluster.getPollingSettings()
|
|
|
|
|
|
+ processing,
|
|
|
|
+ cluster.getPollingSettings(),
|
|
|
|
+ new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register)
|
|
);
|
|
);
|
|
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter(
|
|
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardRecordEmitter(
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
consumerPosition,
|
|
consumerPosition,
|
|
- recordDeserializer,
|
|
|
|
- cluster.getPollingSettings()
|
|
|
|
|
|
+ processing,
|
|
|
|
+ cluster.getPollingSettings(),
|
|
|
|
+ new Cursor.Tracking(deserializer, consumerPosition, filter, limit, cursorsStorage::register)
|
|
);
|
|
);
|
|
case TAILING -> new TailingEmitter(
|
|
case TAILING -> new TailingEmitter(
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
() -> consumerGroupService.createConsumer(cluster),
|
|
consumerPosition,
|
|
consumerPosition,
|
|
- recordDeserializer,
|
|
|
|
|
|
+ processing,
|
|
cluster.getPollingSettings()
|
|
cluster.getPollingSettings()
|
|
);
|
|
);
|
|
};
|
|
};
|
|
-
|
|
|
|
- MessageFilterStats filterStats = new MessageFilterStats();
|
|
|
|
return Flux.create(emitter)
|
|
return Flux.create(emitter)
|
|
- .contextWrite(ctx -> ctx.put(MessageFilterStats.class, filterStats))
|
|
|
|
- .filter(getMsgFilter(query, filterId, filterStats))
|
|
|
|
.map(getDataMasker(cluster, topic))
|
|
.map(getDataMasker(cluster, topic))
|
|
- .takeWhile(createTakeWhilePredicate(consumerPosition.pollingMode(), limit))
|
|
|
|
.map(throttleUiPublish(consumerPosition.pollingMode()));
|
|
.map(throttleUiPublish(consumerPosition.pollingMode()));
|
|
}
|
|
}
|
|
|
|
|
|
- private Predicate<TopicMessageEventDTO> createTakeWhilePredicate(
|
|
|
|
- PollingModeDTO pollingMode, int limit) {
|
|
|
|
- return pollingMode == PollingModeDTO.TAILING
|
|
|
|
- ? evt -> true // no limit for tailing
|
|
|
|
- : new ResultSizeLimiter(limit);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
|
|
|
- var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
|
|
|
- var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
|
|
|
- return evt -> {
|
|
|
|
- if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
|
|
|
- return evt;
|
|
|
|
- }
|
|
|
|
- return evt.message(
|
|
|
|
- evt.getMessage()
|
|
|
|
- .key(keyMasker.apply(evt.getMessage().getKey()))
|
|
|
|
- .content(valMasker.apply(evt.getMessage().getContent())));
|
|
|
|
- };
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
public String registerMessageFilter(String groovyCode) {
|
|
public String registerMessageFilter(String groovyCode) {
|
|
- var filter = MessageFilters.groovyScriptFilter(groovyCode);
|
|
|
|
- var id = RandomStringUtils.random(10, true, true);
|
|
|
|
- registeredFilters.put(id, filter);
|
|
|
|
- return id;
|
|
|
|
|
|
+ String saltedCode = groovyCode + SALT_FOR_HASHING;
|
|
|
|
+ String filterId = Hashing.sha256()
|
|
|
|
+ .hashString(saltedCode, Charsets.UTF_8)
|
|
|
|
+ .toString()
|
|
|
|
+ .substring(0, 8);
|
|
|
|
+ if (registeredFilters.getIfPresent(filterId) == null) {
|
|
|
|
+ registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode));
|
|
|
|
+ }
|
|
|
|
+ return filterId;
|
|
}
|
|
}
|
|
|
|
|
|
- private Predicate<TopicMessageEventDTO> getMsgFilter(@Nullable String containsStrFilter,
|
|
|
|
- @Nullable String filterId,
|
|
|
|
- MessageFilterStats filterStats) {
|
|
|
|
|
|
+ private Predicate<TopicMessageDTO> getMsgFilter(@Nullable String containsStrFilter,
|
|
|
|
+ @Nullable String smartFilterId) {
|
|
Predicate<TopicMessageDTO> messageFilter = MessageFilters.noop();
|
|
Predicate<TopicMessageDTO> messageFilter = MessageFilters.noop();
|
|
if (containsStrFilter != null) {
|
|
if (containsStrFilter != null) {
|
|
- messageFilter = MessageFilters.containsStringFilter(containsStrFilter);
|
|
|
|
|
|
+ messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter));
|
|
}
|
|
}
|
|
- if (filterId != null) {
|
|
|
|
- messageFilter = registeredFilters.get(filterId);
|
|
|
|
- if (messageFilter == null) {
|
|
|
|
- throw new ValidationException("No filter was registered with id " + filterId);
|
|
|
|
|
|
+ if (smartFilterId != null) {
|
|
|
|
+ var registered = registeredFilters.getIfPresent(smartFilterId);
|
|
|
|
+ if (registered == null) {
|
|
|
|
+ throw new ValidationException("No filter was registered with id " + smartFilterId);
|
|
}
|
|
}
|
|
|
|
+ messageFilter = messageFilter.and(registered);
|
|
}
|
|
}
|
|
- Predicate<TopicMessageDTO> finalMessageFilter = messageFilter;
|
|
|
|
- return evt -> {
|
|
|
|
- // we only apply filter for message events
|
|
|
|
- if (evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
|
|
|
- try {
|
|
|
|
- return finalMessageFilter.test(evt.getMessage());
|
|
|
|
- } catch (Exception e) {
|
|
|
|
- filterStats.incrementApplyErrors();
|
|
|
|
- log.trace("Error applying filter for message {}", evt.getMessage());
|
|
|
|
- return false;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- return true;
|
|
|
|
- };
|
|
|
|
|
|
+ return messageFilter;
|
|
}
|
|
}
|
|
|
|
|
|
private <T> UnaryOperator<T> throttleUiPublish(PollingModeDTO pollingMode) {
|
|
private <T> UnaryOperator<T> throttleUiPublish(PollingModeDTO pollingMode) {
|