MessagesService.java 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. package com.provectus.kafka.ui.service;
  2. import com.google.common.util.concurrent.RateLimiter;
  3. import com.provectus.kafka.ui.config.ClustersProperties;
  4. import com.provectus.kafka.ui.emitter.AbstractEmitter;
  5. import com.provectus.kafka.ui.emitter.BackwardEmitter;
  6. import com.provectus.kafka.ui.emitter.ForwardEmitter;
  7. import com.provectus.kafka.ui.emitter.MessageFilters;
  8. import com.provectus.kafka.ui.emitter.TailingEmitter;
  9. import com.provectus.kafka.ui.exception.TopicNotFoundException;
  10. import com.provectus.kafka.ui.exception.ValidationException;
  11. import com.provectus.kafka.ui.model.ConsumerPosition;
  12. import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
  13. import com.provectus.kafka.ui.model.KafkaCluster;
  14. import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
  15. import com.provectus.kafka.ui.model.SeekDirectionDTO;
  16. import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
  17. import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
  18. import com.provectus.kafka.ui.model.TopicMessageDTO;
  19. import com.provectus.kafka.ui.model.TopicMessageEventDTO;
  20. import com.provectus.kafka.ui.serde.api.Serde;
  21. import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
  22. import com.provectus.kafka.ui.util.SslPropertiesUtil;
  23. import java.time.Instant;
  24. import java.time.OffsetDateTime;
  25. import java.time.ZoneOffset;
  26. import java.util.List;
  27. import java.util.Map;
  28. import java.util.Optional;
  29. import java.util.Properties;
  30. import java.util.concurrent.CompletableFuture;
  31. import java.util.function.Predicate;
  32. import java.util.function.UnaryOperator;
  33. import java.util.stream.Collectors;
  34. import javax.annotation.Nullable;
  35. import lombok.extern.slf4j.Slf4j;
  36. import org.apache.commons.lang3.StringUtils;
  37. import org.apache.kafka.clients.admin.OffsetSpec;
  38. import org.apache.kafka.clients.admin.TopicDescription;
  39. import org.apache.kafka.clients.producer.KafkaProducer;
  40. import org.apache.kafka.clients.producer.ProducerConfig;
  41. import org.apache.kafka.clients.producer.ProducerRecord;
  42. import org.apache.kafka.clients.producer.RecordMetadata;
  43. import org.apache.kafka.common.TopicPartition;
  44. import org.apache.kafka.common.serialization.ByteArraySerializer;
  45. import org.springframework.stereotype.Service;
  46. import reactor.core.publisher.Flux;
  47. import reactor.core.publisher.Mono;
  48. import reactor.core.scheduler.Schedulers;
  49. @Service
  50. @Slf4j
  51. public class MessagesService {
  52. private static final int DEFAULT_MAX_PAGE_SIZE = 500;
  53. private static final int DEFAULT_PAGE_SIZE = 100;
  54. // limiting UI messages rate to 20/sec in tailing mode
  55. private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
  56. private final AdminClientService adminClientService;
  57. private final DeserializationService deserializationService;
  58. private final ConsumerGroupService consumerGroupService;
  59. private final int maxPageSize;
  60. private final int defaultPageSize;
  61. public MessagesService(AdminClientService adminClientService,
  62. DeserializationService deserializationService,
  63. ConsumerGroupService consumerGroupService,
  64. ClustersProperties properties) {
  65. this.adminClientService = adminClientService;
  66. this.deserializationService = deserializationService;
  67. this.consumerGroupService = consumerGroupService;
  68. var pollingProps = Optional.ofNullable(properties.getPolling())
  69. .orElseGet(ClustersProperties.PollingProperties::new);
  70. this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize())
  71. .orElse(DEFAULT_MAX_PAGE_SIZE);
  72. this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize())
  73. .orElse(DEFAULT_PAGE_SIZE);
  74. }
  75. private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
  76. return adminClientService.get(cluster)
  77. .flatMap(client -> client.describeTopic(topicName))
  78. .switchIfEmpty(Mono.error(new TopicNotFoundException()));
  79. }
  80. public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
  81. Predicate<TopicMessageDTO> predicate;
  82. try {
  83. predicate = MessageFilters.createMsgFilter(
  84. execData.getFilterCode(),
  85. MessageFilterTypeDTO.GROOVY_SCRIPT
  86. );
  87. } catch (Exception e) {
  88. log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
  89. return new SmartFilterTestExecutionResultDTO()
  90. .error("Compilation error : " + e.getMessage());
  91. }
  92. try {
  93. var result = predicate.test(
  94. new TopicMessageDTO()
  95. .key(execData.getKey())
  96. .content(execData.getValue())
  97. .headers(execData.getHeaders())
  98. .offset(execData.getOffset())
  99. .partition(execData.getPartition())
  100. .timestamp(
  101. Optional.ofNullable(execData.getTimestampMs())
  102. .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC))
  103. .orElse(null))
  104. );
  105. return new SmartFilterTestExecutionResultDTO()
  106. .result(result);
  107. } catch (Exception e) {
  108. log.info("Smart filter {} execution error", execData, e);
  109. return new SmartFilterTestExecutionResultDTO()
  110. .error("Execution error : " + e.getMessage());
  111. }
  112. }
  113. public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
  114. List<Integer> partitionsToInclude) {
  115. return withExistingTopic(cluster, topicName)
  116. .flatMap(td ->
  117. offsetsForDeletion(cluster, topicName, partitionsToInclude)
  118. .flatMap(offsets ->
  119. adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets))));
  120. }
  121. private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
  122. List<Integer> partitionsToInclude) {
  123. return adminClientService.get(cluster).flatMap(ac ->
  124. ac.listTopicOffsets(topicName, OffsetSpec.earliest(), true)
  125. .zipWith(ac.listTopicOffsets(topicName, OffsetSpec.latest(), true),
  126. (start, end) ->
  127. end.entrySet().stream()
  128. .filter(e -> partitionsToInclude.isEmpty()
  129. || partitionsToInclude.contains(e.getKey().partition()))
  130. // we only need non-empty partitions (where start offset != end offset)
  131. .filter(entry -> !entry.getValue().equals(start.get(entry.getKey())))
  132. .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)))
  133. );
  134. }
  135. public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
  136. CreateTopicMessageDTO msg) {
  137. return withExistingTopic(cluster, topic)
  138. .publishOn(Schedulers.boundedElastic())
  139. .flatMap(desc -> sendMessageImpl(cluster, desc, msg));
  140. }
  141. private Mono<RecordMetadata> sendMessageImpl(KafkaCluster cluster,
  142. TopicDescription topicDescription,
  143. CreateTopicMessageDTO msg) {
  144. if (msg.getPartition() != null
  145. && msg.getPartition() > topicDescription.partitions().size() - 1) {
  146. return Mono.error(new ValidationException("Invalid partition"));
  147. }
  148. ProducerRecordCreator producerRecordCreator =
  149. deserializationService.producerRecordCreator(
  150. cluster,
  151. topicDescription.name(),
  152. msg.getKeySerde().get(),
  153. msg.getValueSerde().get()
  154. );
  155. try (KafkaProducer<byte[], byte[]> producer = createProducer(cluster, Map.of())) {
  156. ProducerRecord<byte[], byte[]> producerRecord = producerRecordCreator.create(
  157. topicDescription.name(),
  158. msg.getPartition(),
  159. msg.getKey().orElse(null),
  160. msg.getContent().orElse(null),
  161. msg.getHeaders()
  162. );
  163. CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
  164. producer.send(producerRecord, (metadata, exception) -> {
  165. if (exception != null) {
  166. cf.completeExceptionally(exception);
  167. } else {
  168. cf.complete(metadata);
  169. }
  170. });
  171. return Mono.fromFuture(cf);
  172. } catch (Throwable e) {
  173. return Mono.error(e);
  174. }
  175. }
  176. public static KafkaProducer<byte[], byte[]> createProducer(KafkaCluster cluster,
  177. Map<String, Object> additionalProps) {
  178. Properties properties = new Properties();
  179. SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
  180. properties.putAll(cluster.getProperties());
  181. properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
  182. properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  183. properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  184. properties.putAll(additionalProps);
  185. return new KafkaProducer<>(properties);
  186. }
  187. public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
  188. ConsumerPosition consumerPosition,
  189. @Nullable String query,
  190. MessageFilterTypeDTO filterQueryType,
  191. @Nullable Integer pageSize,
  192. SeekDirectionDTO seekDirection,
  193. @Nullable String keySerde,
  194. @Nullable String valueSerde) {
  195. return withExistingTopic(cluster, topic)
  196. .flux()
  197. .publishOn(Schedulers.boundedElastic())
  198. .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
  199. filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
  200. }
  201. private int fixPageSize(@Nullable Integer pageSize) {
  202. return Optional.ofNullable(pageSize)
  203. .filter(ps -> ps > 0 && ps <= maxPageSize)
  204. .orElse(defaultPageSize);
  205. }
  206. private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
  207. String topic,
  208. ConsumerPosition consumerPosition,
  209. @Nullable String query,
  210. MessageFilterTypeDTO filterQueryType,
  211. int limit,
  212. SeekDirectionDTO seekDirection,
  213. @Nullable String keySerde,
  214. @Nullable String valueSerde) {
  215. var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
  216. var filter = getMsgFilter(query, filterQueryType);
  217. AbstractEmitter emitter = switch (seekDirection) {
  218. case FORWARD -> new ForwardEmitter(
  219. () -> consumerGroupService.createConsumer(cluster),
  220. consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
  221. );
  222. case BACKWARD -> new BackwardEmitter(
  223. () -> consumerGroupService.createConsumer(cluster),
  224. consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
  225. );
  226. case TAILING -> new TailingEmitter(
  227. () -> consumerGroupService.createConsumer(cluster),
  228. consumerPosition, deserializer, filter, cluster.getPollingSettings()
  229. );
  230. };
  231. return Flux.create(emitter)
  232. .map(getDataMasker(cluster, topic))
  233. .map(throttleUiPublish(seekDirection));
  234. }
  235. private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
  236. var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
  237. var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
  238. return evt -> {
  239. if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
  240. return evt;
  241. }
  242. return evt.message(
  243. evt.getMessage()
  244. .key(keyMasker.apply(evt.getMessage().getKey()))
  245. .content(valMasker.apply(evt.getMessage().getContent())));
  246. };
  247. }
  248. private Predicate<TopicMessageDTO> getMsgFilter(String query,
  249. MessageFilterTypeDTO filterQueryType) {
  250. if (StringUtils.isEmpty(query)) {
  251. return evt -> true;
  252. }
  253. return MessageFilters.createMsgFilter(query, filterQueryType);
  254. }
  255. private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
  256. if (seekDirection == SeekDirectionDTO.TAILING) {
  257. RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
  258. return m -> {
  259. rateLimiter.acquire(1);
  260. return m;
  261. };
  262. }
  263. // there is no need to throttle UI production rate for non-tailing modes, since max number of produced
  264. // messages is limited for them (with page size)
  265. return UnaryOperator.identity();
  266. }
  267. }