MessagesService.java 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. package com.provectus.kafka.ui.service;
  2. import com.google.common.util.concurrent.RateLimiter;
  3. import com.provectus.kafka.ui.config.ClustersProperties;
  4. import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
  5. import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
  6. import com.provectus.kafka.ui.emitter.MessageFilters;
  7. import com.provectus.kafka.ui.emitter.MessagesProcessing;
  8. import com.provectus.kafka.ui.emitter.TailingEmitter;
  9. import com.provectus.kafka.ui.exception.TopicNotFoundException;
  10. import com.provectus.kafka.ui.exception.ValidationException;
  11. import com.provectus.kafka.ui.model.ConsumerPosition;
  12. import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
  13. import com.provectus.kafka.ui.model.KafkaCluster;
  14. import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
  15. import com.provectus.kafka.ui.model.SeekDirectionDTO;
  16. import com.provectus.kafka.ui.model.TopicMessageDTO;
  17. import com.provectus.kafka.ui.model.TopicMessageEventDTO;
  18. import com.provectus.kafka.ui.serde.api.Serde;
  19. import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
  20. import com.provectus.kafka.ui.util.SslPropertiesUtil;
  21. import java.util.List;
  22. import java.util.Map;
  23. import java.util.Optional;
  24. import java.util.Properties;
  25. import java.util.concurrent.CompletableFuture;
  26. import java.util.function.Predicate;
  27. import java.util.function.UnaryOperator;
  28. import java.util.stream.Collectors;
  29. import javax.annotation.Nullable;
  30. import lombok.extern.slf4j.Slf4j;
  31. import org.apache.commons.lang3.StringUtils;
  32. import org.apache.kafka.clients.admin.OffsetSpec;
  33. import org.apache.kafka.clients.admin.TopicDescription;
  34. import org.apache.kafka.clients.producer.KafkaProducer;
  35. import org.apache.kafka.clients.producer.ProducerConfig;
  36. import org.apache.kafka.clients.producer.ProducerRecord;
  37. import org.apache.kafka.clients.producer.RecordMetadata;
  38. import org.apache.kafka.common.TopicPartition;
  39. import org.apache.kafka.common.serialization.ByteArraySerializer;
  40. import org.springframework.stereotype.Service;
  41. import reactor.core.publisher.Flux;
  42. import reactor.core.publisher.FluxSink;
  43. import reactor.core.publisher.Mono;
  44. import reactor.core.scheduler.Schedulers;
  45. @Service
  46. @Slf4j
  47. public class MessagesService {
  48. private static final int DEFAULT_MAX_PAGE_SIZE = 500;
  49. private static final int DEFAULT_PAGE_SIZE = 100;
  50. // limiting UI messages rate to 20/sec in tailing mode
  51. private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
  52. private final AdminClientService adminClientService;
  53. private final DeserializationService deserializationService;
  54. private final ConsumerGroupService consumerGroupService;
  55. private final int maxPageSize;
  56. private final int defaultPageSize;
  57. public MessagesService(AdminClientService adminClientService,
  58. DeserializationService deserializationService,
  59. ConsumerGroupService consumerGroupService,
  60. ClustersProperties properties) {
  61. this.adminClientService = adminClientService;
  62. this.deserializationService = deserializationService;
  63. this.consumerGroupService = consumerGroupService;
  64. var pollingProps = Optional.ofNullable(properties.getPolling())
  65. .orElseGet(ClustersProperties.PollingProperties::new);
  66. this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize())
  67. .orElse(DEFAULT_MAX_PAGE_SIZE);
  68. this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize())
  69. .orElse(DEFAULT_PAGE_SIZE);
  70. }
  71. private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
  72. return adminClientService.get(cluster)
  73. .flatMap(client -> client.describeTopic(topicName))
  74. .switchIfEmpty(Mono.error(new TopicNotFoundException()));
  75. }
  76. public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
  77. List<Integer> partitionsToInclude) {
  78. return withExistingTopic(cluster, topicName)
  79. .flatMap(td ->
  80. offsetsForDeletion(cluster, topicName, partitionsToInclude)
  81. .flatMap(offsets ->
  82. adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets))));
  83. }
  84. private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
  85. List<Integer> partitionsToInclude) {
  86. return adminClientService.get(cluster).flatMap(ac ->
  87. ac.listTopicOffsets(topicName, OffsetSpec.earliest(), true)
  88. .zipWith(ac.listTopicOffsets(topicName, OffsetSpec.latest(), true),
  89. (start, end) ->
  90. end.entrySet().stream()
  91. .filter(e -> partitionsToInclude.isEmpty()
  92. || partitionsToInclude.contains(e.getKey().partition()))
  93. // we only need non-empty partitions (where start offset != end offset)
  94. .filter(entry -> !entry.getValue().equals(start.get(entry.getKey())))
  95. .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)))
  96. );
  97. }
  98. public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
  99. CreateTopicMessageDTO msg) {
  100. return withExistingTopic(cluster, topic)
  101. .publishOn(Schedulers.boundedElastic())
  102. .flatMap(desc -> sendMessageImpl(cluster, desc, msg));
  103. }
  104. private Mono<RecordMetadata> sendMessageImpl(KafkaCluster cluster,
  105. TopicDescription topicDescription,
  106. CreateTopicMessageDTO msg) {
  107. if (msg.getPartition() != null
  108. && msg.getPartition() > topicDescription.partitions().size() - 1) {
  109. return Mono.error(new ValidationException("Invalid partition"));
  110. }
  111. ProducerRecordCreator producerRecordCreator =
  112. deserializationService.producerRecordCreator(
  113. cluster,
  114. topicDescription.name(),
  115. msg.getKeySerde().get(),
  116. msg.getValueSerde().get()
  117. );
  118. Properties properties = new Properties();
  119. SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
  120. properties.putAll(cluster.getProperties());
  121. properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
  122. properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  123. properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
  124. try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
  125. ProducerRecord<byte[], byte[]> producerRecord = producerRecordCreator.create(
  126. topicDescription.name(),
  127. msg.getPartition(),
  128. msg.getKey().orElse(null),
  129. msg.getContent().orElse(null),
  130. msg.getHeaders()
  131. );
  132. CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
  133. producer.send(producerRecord, (metadata, exception) -> {
  134. if (exception != null) {
  135. cf.completeExceptionally(exception);
  136. } else {
  137. cf.complete(metadata);
  138. }
  139. });
  140. return Mono.fromFuture(cf);
  141. } catch (Throwable e) {
  142. return Mono.error(e);
  143. }
  144. }
  145. public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
  146. ConsumerPosition consumerPosition,
  147. @Nullable String query,
  148. MessageFilterTypeDTO filterQueryType,
  149. @Nullable Integer pageSize,
  150. SeekDirectionDTO seekDirection,
  151. @Nullable String keySerde,
  152. @Nullable String valueSerde) {
  153. return withExistingTopic(cluster, topic)
  154. .flux()
  155. .publishOn(Schedulers.boundedElastic())
  156. .flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
  157. filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
  158. }
  159. private int fixPageSize(@Nullable Integer pageSize) {
  160. return Optional.ofNullable(pageSize)
  161. .filter(ps -> ps > 0 && ps <= maxPageSize)
  162. .orElse(defaultPageSize);
  163. }
  164. private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
  165. String topic,
  166. ConsumerPosition consumerPosition,
  167. @Nullable String query,
  168. MessageFilterTypeDTO filterQueryType,
  169. int limit,
  170. SeekDirectionDTO seekDirection,
  171. @Nullable String keySerde,
  172. @Nullable String valueSerde) {
  173. java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
  174. var processing = new MessagesProcessing(
  175. deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
  176. getMsgFilter(query, filterQueryType),
  177. seekDirection == SeekDirectionDTO.TAILING ? null : limit
  178. );
  179. if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
  180. emitter = new ForwardRecordEmitter(
  181. () -> consumerGroupService.createConsumer(cluster),
  182. consumerPosition,
  183. processing,
  184. cluster.getPollingSettings()
  185. );
  186. } else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
  187. emitter = new BackwardRecordEmitter(
  188. () -> consumerGroupService.createConsumer(cluster),
  189. consumerPosition,
  190. limit,
  191. processing,
  192. cluster.getPollingSettings()
  193. );
  194. } else {
  195. emitter = new TailingEmitter(
  196. () -> consumerGroupService.createConsumer(cluster),
  197. consumerPosition,
  198. processing,
  199. cluster.getPollingSettings()
  200. );
  201. }
  202. return Flux.create(emitter)
  203. .map(getDataMasker(cluster, topic))
  204. .map(throttleUiPublish(seekDirection));
  205. }
  206. private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
  207. var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
  208. var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
  209. return evt -> {
  210. if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
  211. return evt;
  212. }
  213. return evt.message(
  214. evt.getMessage()
  215. .key(keyMasker.apply(evt.getMessage().getKey()))
  216. .content(valMasker.apply(evt.getMessage().getContent())));
  217. };
  218. }
  219. private Predicate<TopicMessageDTO> getMsgFilter(String query,
  220. MessageFilterTypeDTO filterQueryType) {
  221. if (StringUtils.isEmpty(query)) {
  222. return evt -> true;
  223. }
  224. return MessageFilters.createMsgFilter(query, filterQueryType);
  225. }
  226. private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
  227. if (seekDirection == SeekDirectionDTO.TAILING) {
  228. RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
  229. return m -> {
  230. rateLimiter.acquire(1);
  231. return m;
  232. };
  233. }
  234. // there is no need to throttle UI production rate for non-tailing modes, since max number of produced
  235. // messages is limited for them (with page size)
  236. return UnaryOperator.identity();
  237. }
  238. }