MessagesService.java 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. package com.provectus.kafka.ui.service;
  2. import com.google.common.util.concurrent.RateLimiter;
  3. import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
  4. import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
  5. import com.provectus.kafka.ui.emitter.MessageFilters;
  6. import com.provectus.kafka.ui.emitter.MessagesProcessing;
  7. import com.provectus.kafka.ui.emitter.TailingEmitter;
  8. import com.provectus.kafka.ui.exception.TopicNotFoundException;
  9. import com.provectus.kafka.ui.exception.ValidationException;
  10. import com.provectus.kafka.ui.model.ConsumerPosition;
  11. import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
  12. import com.provectus.kafka.ui.model.KafkaCluster;
  13. import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
  14. import com.provectus.kafka.ui.model.SeekDirectionDTO;
  15. import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
  16. import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
  17. import com.provectus.kafka.ui.model.TopicMessageDTO;
  18. import com.provectus.kafka.ui.model.TopicMessageEventDTO;
  19. import com.provectus.kafka.ui.serde.api.Serde;
  20. import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
  21. import com.provectus.kafka.ui.util.SslPropertiesUtil;
  22. import java.time.Instant;
  23. import java.time.OffsetDateTime;
  24. import java.time.ZoneOffset;
  25. import java.util.List;
  26. import java.util.Map;
  27. import java.util.Optional;
  28. import java.util.Properties;
  29. import java.util.concurrent.CompletableFuture;
  30. import java.util.function.Predicate;
  31. import java.util.function.UnaryOperator;
  32. import java.util.stream.Collectors;
  33. import javax.annotation.Nullable;
  34. import lombok.RequiredArgsConstructor;
  35. import lombok.extern.slf4j.Slf4j;
  36. import org.apache.commons.lang3.StringUtils;
  37. import org.apache.kafka.clients.admin.OffsetSpec;
  38. import org.apache.kafka.clients.admin.TopicDescription;
  39. import org.apache.kafka.clients.producer.KafkaProducer;
  40. import org.apache.kafka.clients.producer.ProducerConfig;
  41. import org.apache.kafka.clients.producer.ProducerRecord;
  42. import org.apache.kafka.clients.producer.RecordMetadata;
  43. import org.apache.kafka.common.TopicPartition;
  44. import org.apache.kafka.common.serialization.ByteArraySerializer;
  45. import org.springframework.stereotype.Service;
  46. import reactor.core.publisher.Flux;
  47. import reactor.core.publisher.FluxSink;
  48. import reactor.core.publisher.Mono;
  49. import reactor.core.scheduler.Schedulers;
  50. @Service
  51. @RequiredArgsConstructor
  52. @Slf4j
  53. public class MessagesService {
  54. // limiting UI messages rate to 20/sec in tailing mode
  55. public static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
  56. private final AdminClientService adminClientService;
  57. private final DeserializationService deserializationService;
  58. private final ConsumerGroupService consumerGroupService;
  59. private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
  60. return adminClientService.get(cluster)
  61. .flatMap(client -> client.describeTopic(topicName))
  62. .switchIfEmpty(Mono.error(new TopicNotFoundException()));
  63. }
  64. public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
  65. Predicate<TopicMessageDTO> predicate;
  66. try {
  67. predicate = MessageFilters.createMsgFilter(
  68. execData.getFilterCode(),
  69. MessageFilterTypeDTO.GROOVY_SCRIPT
  70. );
  71. } catch (Exception e) {
  72. log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
  73. return new SmartFilterTestExecutionResultDTO()
  74. .error("Compilation error : " + e.getMessage());
  75. }
  76. try {
  77. var result = predicate.test(
  78. new TopicMessageDTO()
  79. .key(execData.getKey())
  80. .content(execData.getValue())
  81. .headers(execData.getHeaders())
  82. .offset(execData.getOffset())
  83. .timestamp(
  84. Optional.ofNullable(execData.getTimestampMs())
  85. .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC))
  86. .orElse(null))
  87. );
  88. return new SmartFilterTestExecutionResultDTO()
  89. .result(result);
  90. } catch (Exception e) {
  91. log.info("Smart filter {} execution error", execData, e);
  92. return new SmartFilterTestExecutionResultDTO()
  93. .error("Execution error : " + e.getMessage());
  94. }
  95. }
  96. public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
  97. List<Integer> partitionsToInclude) {
  98. return withExistingTopic(cluster, topicName)
  99. .flatMap(td ->
  100. offsetsForDeletion(cluster, topicName, partitionsToInclude)
  101. .flatMap(offsets ->
  102. adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets))));
  103. }
  104. private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
  105. List<Integer> partitionsToInclude) {
  106. return adminClientService.get(cluster).flatMap(ac ->
  107. ac.listTopicOffsets(topicName, OffsetSpec.earliest(), true)
  108. .zipWith(ac.listTopicOffsets(topicName, OffsetSpec.latest(), true),
  109. (start, end) ->
  110. end.entrySet().stream()
  111. .filter(e -> partitionsToInclude.isEmpty()
  112. || partitionsToInclude.contains(e.getKey().partition()))
  113. // we only need non-empty partitions (where start offset != end offset)
  114. .filter(entry -> !entry.getValue().equals(start.get(entry.getKey())))
  115. .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)))
  116. );
  117. }
  118. public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
  119. CreateTopicMessageDTO msg) {
  120. return withExistingTopic(cluster, topic)
  121. .publishOn(Schedulers.boundedElastic())
  122. .flatMap(desc -> sendMessageImpl(cluster, desc, msg));
  123. }
  124. private Mono<RecordMetadata> sendMessageImpl(KafkaCluster cluster,
  125. TopicDescription topicDescription,
  126. CreateTopicMessageDTO msg) {
  127. if (msg.getPartition() != null
  128. && msg.getPartition() > topicDescription.partitions().size() - 1) {
  129. return Mono.error(new ValidationException("Invalid partition"));
  130. }
  131. ProducerRecordCreator producerRecordCreator =
  132. deserializationService.producerRecordCreator(
  133. cluster,
  134. topicDescription.name(),
  135. msg.getKeySerde().get(),
  136. msg.getValueSerde().get()
  137. );
  138. Properties properties = new Properties();
  139. SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
  140. properties.putAll(cluster.getProperties());
  141. properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
  142. properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  143. properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  144. try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
  145. ProducerRecord<byte[], byte[]> producerRecord = producerRecordCreator.create(
  146. topicDescription.name(),
  147. msg.getPartition(),
  148. msg.getKey().orElse(null),
  149. msg.getContent().orElse(null),
  150. msg.getHeaders()
  151. );
  152. CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
  153. producer.send(producerRecord, (metadata, exception) -> {
  154. if (exception != null) {
  155. cf.completeExceptionally(exception);
  156. } else {
  157. cf.complete(metadata);
  158. }
  159. });
  160. return Mono.fromFuture(cf);
  161. } catch (Throwable e) {
  162. return Mono.error(e);
  163. }
  164. }
  165. public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
  166. ConsumerPosition consumerPosition,
  167. @Nullable String query,
  168. MessageFilterTypeDTO filterQueryType,
  169. int limit,
  170. SeekDirectionDTO seekDirection,
  171. @Nullable String keySerde,
  172. @Nullable String valueSerde) {
  173. return withExistingTopic(cluster, topic)
  174. .flux()
  175. .publishOn(Schedulers.boundedElastic())
  176. .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
  177. filterQueryType, limit, seekDirection, keySerde, valueSerde));
  178. }
  179. private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
  180. String topic,
  181. ConsumerPosition consumerPosition,
  182. @Nullable String query,
  183. MessageFilterTypeDTO filterQueryType,
  184. int limit,
  185. SeekDirectionDTO seekDirection,
  186. @Nullable String keySerde,
  187. @Nullable String valueSerde) {
  188. java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
  189. var processing = new MessagesProcessing(
  190. deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
  191. getMsgFilter(query, filterQueryType),
  192. seekDirection == SeekDirectionDTO.TAILING ? null : limit
  193. );
  194. if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
  195. emitter = new ForwardRecordEmitter(
  196. () -> consumerGroupService.createConsumer(cluster),
  197. consumerPosition,
  198. processing,
  199. cluster.getPollingSettings()
  200. );
  201. } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
  202. emitter = new BackwardRecordEmitter(
  203. () -> consumerGroupService.createConsumer(cluster),
  204. consumerPosition,
  205. limit,
  206. processing,
  207. cluster.getPollingSettings()
  208. );
  209. } else {
  210. emitter = new TailingEmitter(
  211. () -> consumerGroupService.createConsumer(cluster),
  212. consumerPosition,
  213. processing,
  214. cluster.getPollingSettings()
  215. );
  216. }
  217. return Flux.create(emitter)
  218. .map(getDataMasker(cluster, topic))
  219. .map(throttleUiPublish(seekDirection));
  220. }
  221. private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
  222. var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
  223. var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
  224. return evt -> {
  225. if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
  226. return evt;
  227. }
  228. return evt.message(
  229. evt.getMessage()
  230. .key(keyMasker.apply(evt.getMessage().getKey()))
  231. .content(valMasker.apply(evt.getMessage().getContent())));
  232. };
  233. }
  234. private Predicate<TopicMessageDTO> getMsgFilter(String query,
  235. MessageFilterTypeDTO filterQueryType) {
  236. if (StringUtils.isEmpty(query)) {
  237. return evt -> true;
  238. }
  239. return MessageFilters.createMsgFilter(query, filterQueryType);
  240. }
  241. private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
  242. if (seekDirection == SeekDirectionDTO.TAILING) {
  243. RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
  244. return m -> {
  245. rateLimiter.acquire(1);
  246. return m;
  247. };
  248. }
  249. // there is no need to throttle UI production rate for non-tailing modes, since max number of produced
  250. // messages is limited for them (with page size)
  251. return UnaryOperator.identity();
  252. }
  253. }