MessagesService.java 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. package com.provectus.kafka.ui.service;
  2. import com.google.common.util.concurrent.RateLimiter;
  3. import com.provectus.kafka.ui.config.ClustersProperties;
  4. import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
  5. import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
  6. import com.provectus.kafka.ui.emitter.MessageFilters;
  7. import com.provectus.kafka.ui.emitter.MessagesProcessing;
  8. import com.provectus.kafka.ui.emitter.TailingEmitter;
  9. import com.provectus.kafka.ui.exception.TopicNotFoundException;
  10. import com.provectus.kafka.ui.exception.ValidationException;
  11. import com.provectus.kafka.ui.model.ConsumerPosition;
  12. import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
  13. import com.provectus.kafka.ui.model.KafkaCluster;
  14. import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
  15. import com.provectus.kafka.ui.model.SeekDirectionDTO;
  16. import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
  17. import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
  18. import com.provectus.kafka.ui.model.TopicMessageDTO;
  19. import com.provectus.kafka.ui.model.TopicMessageEventDTO;
  20. import com.provectus.kafka.ui.serde.api.Serde;
  21. import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
  22. import com.provectus.kafka.ui.util.SslPropertiesUtil;
  23. import java.time.Instant;
  24. import java.time.OffsetDateTime;
  25. import java.time.ZoneOffset;
  26. import java.util.List;
  27. import java.util.Map;
  28. import java.util.Optional;
  29. import java.util.Properties;
  30. import java.util.concurrent.CompletableFuture;
  31. import java.util.function.Predicate;
  32. import java.util.function.UnaryOperator;
  33. import java.util.stream.Collectors;
  34. import javax.annotation.Nullable;
  35. import lombok.extern.slf4j.Slf4j;
  36. import org.apache.commons.lang3.StringUtils;
  37. import org.apache.kafka.clients.admin.OffsetSpec;
  38. import org.apache.kafka.clients.admin.TopicDescription;
  39. import org.apache.kafka.clients.producer.KafkaProducer;
  40. import org.apache.kafka.clients.producer.ProducerConfig;
  41. import org.apache.kafka.clients.producer.ProducerRecord;
  42. import org.apache.kafka.clients.producer.RecordMetadata;
  43. import org.apache.kafka.common.TopicPartition;
  44. import org.apache.kafka.common.serialization.ByteArraySerializer;
  45. import org.springframework.stereotype.Service;
  46. import reactor.core.publisher.Flux;
  47. import reactor.core.publisher.FluxSink;
  48. import reactor.core.publisher.Mono;
  49. import reactor.core.scheduler.Schedulers;
  50. @Service
  51. @Slf4j
  52. public class MessagesService {
  53. private static final int DEFAULT_MAX_PAGE_SIZE = 500;
  54. private static final int DEFAULT_PAGE_SIZE = 100;
  55. // limiting UI messages rate to 20/sec in tailing mode
  56. private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
  57. private final AdminClientService adminClientService;
  58. private final DeserializationService deserializationService;
  59. private final ConsumerGroupService consumerGroupService;
  60. private final int maxPageSize;
  61. private final int defaultPageSize;
  62. public MessagesService(AdminClientService adminClientService,
  63. DeserializationService deserializationService,
  64. ConsumerGroupService consumerGroupService,
  65. ClustersProperties properties) {
  66. this.adminClientService = adminClientService;
  67. this.deserializationService = deserializationService;
  68. this.consumerGroupService = consumerGroupService;
  69. var pollingProps = Optional.ofNullable(properties.getPolling())
  70. .orElseGet(ClustersProperties.PollingProperties::new);
  71. this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize())
  72. .orElse(DEFAULT_MAX_PAGE_SIZE);
  73. this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize())
  74. .orElse(DEFAULT_PAGE_SIZE);
  75. }
  76. private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
  77. return adminClientService.get(cluster)
  78. .flatMap(client -> client.describeTopic(topicName))
  79. .switchIfEmpty(Mono.error(new TopicNotFoundException()));
  80. }
  81. public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
  82. Predicate<TopicMessageDTO> predicate;
  83. try {
  84. predicate = MessageFilters.createMsgFilter(
  85. execData.getFilterCode(),
  86. MessageFilterTypeDTO.GROOVY_SCRIPT
  87. );
  88. } catch (Exception e) {
  89. log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
  90. return new SmartFilterTestExecutionResultDTO()
  91. .error("Compilation error : " + e.getMessage());
  92. }
  93. try {
  94. var result = predicate.test(
  95. new TopicMessageDTO()
  96. .key(execData.getKey())
  97. .content(execData.getValue())
  98. .headers(execData.getHeaders())
  99. .offset(execData.getOffset())
  100. .partition(execData.getPartition())
  101. .timestamp(
  102. Optional.ofNullable(execData.getTimestampMs())
  103. .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC))
  104. .orElse(null))
  105. );
  106. return new SmartFilterTestExecutionResultDTO()
  107. .result(result);
  108. } catch (Exception e) {
  109. log.info("Smart filter {} execution error", execData, e);
  110. return new SmartFilterTestExecutionResultDTO()
  111. .error("Execution error : " + e.getMessage());
  112. }
  113. }
  114. public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
  115. List<Integer> partitionsToInclude) {
  116. return withExistingTopic(cluster, topicName)
  117. .flatMap(td ->
  118. offsetsForDeletion(cluster, topicName, partitionsToInclude)
  119. .flatMap(offsets ->
  120. adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets))));
  121. }
  122. private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
  123. List<Integer> partitionsToInclude) {
  124. return adminClientService.get(cluster).flatMap(ac ->
  125. ac.listTopicOffsets(topicName, OffsetSpec.earliest(), true)
  126. .zipWith(ac.listTopicOffsets(topicName, OffsetSpec.latest(), true),
  127. (start, end) ->
  128. end.entrySet().stream()
  129. .filter(e -> partitionsToInclude.isEmpty()
  130. || partitionsToInclude.contains(e.getKey().partition()))
  131. // we only need non-empty partitions (where start offset != end offset)
  132. .filter(entry -> !entry.getValue().equals(start.get(entry.getKey())))
  133. .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)))
  134. );
  135. }
  136. public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
  137. CreateTopicMessageDTO msg) {
  138. return withExistingTopic(cluster, topic)
  139. .publishOn(Schedulers.boundedElastic())
  140. .flatMap(desc -> sendMessageImpl(cluster, desc, msg));
  141. }
  142. private Mono<RecordMetadata> sendMessageImpl(KafkaCluster cluster,
  143. TopicDescription topicDescription,
  144. CreateTopicMessageDTO msg) {
  145. if (msg.getPartition() != null
  146. && msg.getPartition() > topicDescription.partitions().size() - 1) {
  147. return Mono.error(new ValidationException("Invalid partition"));
  148. }
  149. ProducerRecordCreator producerRecordCreator =
  150. deserializationService.producerRecordCreator(
  151. cluster,
  152. topicDescription.name(),
  153. msg.getKeySerde().get(),
  154. msg.getValueSerde().get()
  155. );
  156. try (KafkaProducer<byte[], byte[]> producer = createProducer(cluster, Map.of())) {
  157. ProducerRecord<byte[], byte[]> producerRecord = producerRecordCreator.create(
  158. topicDescription.name(),
  159. msg.getPartition(),
  160. msg.getKey().orElse(null),
  161. msg.getContent().orElse(null),
  162. msg.getHeaders()
  163. );
  164. CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
  165. producer.send(producerRecord, (metadata, exception) -> {
  166. if (exception != null) {
  167. cf.completeExceptionally(exception);
  168. } else {
  169. cf.complete(metadata);
  170. }
  171. });
  172. return Mono.fromFuture(cf);
  173. } catch (Throwable e) {
  174. return Mono.error(e);
  175. }
  176. }
  177. public static KafkaProducer<byte[], byte[]> createProducer(KafkaCluster cluster,
  178. Map<String, Object> additionalProps) {
  179. Properties properties = new Properties();
  180. SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
  181. properties.putAll(cluster.getProperties());
  182. properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
  183. properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  184. properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  185. properties.putAll(additionalProps);
  186. return new KafkaProducer<>(properties);
  187. }
  188. public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
  189. ConsumerPosition consumerPosition,
  190. @Nullable String query,
  191. MessageFilterTypeDTO filterQueryType,
  192. @Nullable Integer pageSize,
  193. SeekDirectionDTO seekDirection,
  194. @Nullable String keySerde,
  195. @Nullable String valueSerde) {
  196. return withExistingTopic(cluster, topic)
  197. .flux()
  198. .publishOn(Schedulers.boundedElastic())
  199. .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
  200. filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
  201. }
  202. private int fixPageSize(@Nullable Integer pageSize) {
  203. return Optional.ofNullable(pageSize)
  204. .filter(ps -> ps > 0 && ps <= maxPageSize)
  205. .orElse(defaultPageSize);
  206. }
  207. private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
  208. String topic,
  209. ConsumerPosition consumerPosition,
  210. @Nullable String query,
  211. MessageFilterTypeDTO filterQueryType,
  212. int limit,
  213. SeekDirectionDTO seekDirection,
  214. @Nullable String keySerde,
  215. @Nullable String valueSerde) {
  216. java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
  217. var processing = new MessagesProcessing(
  218. deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
  219. getMsgFilter(query, filterQueryType),
  220. seekDirection == SeekDirectionDTO.TAILING ? null : limit
  221. );
  222. if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
  223. emitter = new ForwardRecordEmitter(
  224. () -> consumerGroupService.createConsumer(cluster),
  225. consumerPosition,
  226. processing,
  227. cluster.getPollingSettings()
  228. );
  229. } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
  230. emitter = new BackwardRecordEmitter(
  231. () -> consumerGroupService.createConsumer(cluster),
  232. consumerPosition,
  233. limit,
  234. processing,
  235. cluster.getPollingSettings()
  236. );
  237. } else {
  238. emitter = new TailingEmitter(
  239. () -> consumerGroupService.createConsumer(cluster),
  240. consumerPosition,
  241. processing,
  242. cluster.getPollingSettings()
  243. );
  244. }
  245. return Flux.create(emitter)
  246. .map(getDataMasker(cluster, topic))
  247. .map(throttleUiPublish(seekDirection));
  248. }
  249. private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
  250. var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
  251. var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
  252. return evt -> {
  253. if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
  254. return evt;
  255. }
  256. return evt.message(
  257. evt.getMessage()
  258. .key(keyMasker.apply(evt.getMessage().getKey()))
  259. .content(valMasker.apply(evt.getMessage().getContent())));
  260. };
  261. }
  262. private Predicate<TopicMessageDTO> getMsgFilter(String query,
  263. MessageFilterTypeDTO filterQueryType) {
  264. if (StringUtils.isEmpty(query)) {
  265. return evt -> true;
  266. }
  267. return MessageFilters.createMsgFilter(query, filterQueryType);
  268. }
  269. private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
  270. if (seekDirection == SeekDirectionDTO.TAILING) {
  271. RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
  272. return m -> {
  273. rateLimiter.acquire(1);
  274. return m;
  275. };
  276. }
  277. // there is no need to throttle UI production rate for non-tailing modes, since max number of produced
  278. // messages is limited for them (with page size)
  279. return UnaryOperator.identity();
  280. }
  281. }