TopicsService.java 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. package com.provectus.kafka.ui.service;
  2. import static java.util.stream.Collectors.toList;
  3. import static java.util.stream.Collectors.toMap;
  4. import com.google.common.annotations.VisibleForTesting;
  5. import com.provectus.kafka.ui.exception.TopicMetadataException;
  6. import com.provectus.kafka.ui.exception.TopicNotFoundException;
  7. import com.provectus.kafka.ui.exception.ValidationException;
  8. import com.provectus.kafka.ui.mapper.ClusterMapper;
  9. import com.provectus.kafka.ui.model.Feature;
  10. import com.provectus.kafka.ui.model.InternalLogDirStats;
  11. import com.provectus.kafka.ui.model.InternalPartition;
  12. import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
  13. import com.provectus.kafka.ui.model.InternalReplica;
  14. import com.provectus.kafka.ui.model.InternalTopic;
  15. import com.provectus.kafka.ui.model.InternalTopicConfig;
  16. import com.provectus.kafka.ui.model.KafkaCluster;
  17. import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
  18. import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
  19. import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
  20. import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
  21. import com.provectus.kafka.ui.model.SortOrderDTO;
  22. import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
  23. import com.provectus.kafka.ui.model.TopicConfigDTO;
  24. import com.provectus.kafka.ui.model.TopicCreationDTO;
  25. import com.provectus.kafka.ui.model.TopicDTO;
  26. import com.provectus.kafka.ui.model.TopicDetailsDTO;
  27. import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
  28. import com.provectus.kafka.ui.model.TopicUpdateDTO;
  29. import com.provectus.kafka.ui.model.TopicsResponseDTO;
  30. import com.provectus.kafka.ui.serde.DeserializationService;
  31. import com.provectus.kafka.ui.util.JmxClusterUtil;
  32. import java.util.Collection;
  33. import java.util.Collections;
  34. import java.util.Comparator;
  35. import java.util.List;
  36. import java.util.Map;
  37. import java.util.Optional;
  38. import java.util.function.Function;
  39. import java.util.function.Predicate;
  40. import lombok.RequiredArgsConstructor;
  41. import lombok.Value;
  42. import org.apache.commons.lang3.StringUtils;
  43. import org.apache.kafka.clients.admin.ConfigEntry;
  44. import org.apache.kafka.clients.admin.NewPartitionReassignment;
  45. import org.apache.kafka.clients.admin.NewPartitions;
  46. import org.apache.kafka.clients.admin.OffsetSpec;
  47. import org.apache.kafka.clients.admin.TopicDescription;
  48. import org.apache.kafka.common.Node;
  49. import org.apache.kafka.common.TopicPartition;
  50. import org.springframework.stereotype.Service;
  51. import reactor.core.publisher.Mono;
  52. @Service
  53. @RequiredArgsConstructor
  54. public class TopicsService {
  55. private static final Integer DEFAULT_PAGE_SIZE = 25;
  56. private final AdminClientService adminClientService;
  57. private final ClusterMapper clusterMapper;
  58. private final DeserializationService deserializationService;
  59. private final MetricsCache metricsCache;
  60. public Mono<TopicsResponseDTO> getTopics(KafkaCluster cluster,
  61. Optional<Integer> pageNum,
  62. Optional<Integer> nullablePerPage,
  63. Optional<Boolean> showInternal,
  64. Optional<String> search,
  65. Optional<TopicColumnsToSortDTO> sortBy,
  66. Optional<SortOrderDTO> sortOrder) {
  67. return adminClientService.get(cluster).flatMap(ac ->
  68. new Pagination(ac, metricsCache.get(cluster))
  69. .getPage(pageNum, nullablePerPage, showInternal, search, sortBy, sortOrder)
  70. .flatMap(page ->
  71. loadTopics(cluster, page.getTopics())
  72. .map(topics ->
  73. new TopicsResponseDTO()
  74. .topics(topics.stream().map(clusterMapper::toTopic).collect(toList()))
  75. .pageCount(page.getTotalPages()))));
  76. }
  77. private Mono<List<InternalTopic>> loadTopics(KafkaCluster c, List<String> topics) {
  78. if (topics.isEmpty()) {
  79. return Mono.just(List.of());
  80. }
  81. return adminClientService.get(c)
  82. .flatMap(ac ->
  83. ac.describeTopics(topics).zipWith(ac.getTopicsConfig(topics),
  84. (descriptions, configs) -> {
  85. metricsCache.update(c, descriptions, configs);
  86. return getPartitionOffsets(descriptions, ac).map(offsets -> {
  87. var metrics = metricsCache.get(c);
  88. return createList(
  89. topics,
  90. descriptions,
  91. configs,
  92. offsets,
  93. metrics.getJmxMetrics(),
  94. metrics.getLogDirInfo()
  95. );
  96. });
  97. })).flatMap(Function.identity());
  98. }
  99. private Mono<InternalTopic> loadTopic(KafkaCluster c, String topicName) {
  100. return loadTopics(c, List.of(topicName))
  101. .map(lst -> lst.stream().findFirst().orElseThrow(TopicNotFoundException::new));
  102. }
  103. private List<InternalTopic> createList(List<String> orderedNames,
  104. Map<String, TopicDescription> descriptions,
  105. Map<String, List<ConfigEntry>> configs,
  106. InternalPartitionsOffsets partitionsOffsets,
  107. JmxClusterUtil.JmxMetrics jmxMetrics,
  108. InternalLogDirStats logDirInfo) {
  109. return orderedNames.stream()
  110. .filter(descriptions::containsKey)
  111. .map(t -> InternalTopic.from(
  112. descriptions.get(t),
  113. configs.getOrDefault(t, List.of()),
  114. partitionsOffsets,
  115. jmxMetrics,
  116. logDirInfo
  117. ))
  118. .collect(toList());
  119. }
  120. private Mono<InternalPartitionsOffsets> getPartitionOffsets(Map<String, TopicDescription>
  121. descriptions,
  122. ReactiveAdminClient ac) {
  123. var topicPartitions = descriptions.values().stream()
  124. .flatMap(desc ->
  125. desc.partitions().stream().map(p -> new TopicPartition(desc.name(), p.partition())))
  126. .collect(toList());
  127. return ac.listOffsets(topicPartitions, OffsetSpec.earliest())
  128. .zipWith(ac.listOffsets(topicPartitions, OffsetSpec.latest()),
  129. (earliest, latest) ->
  130. topicPartitions.stream()
  131. .filter(tp -> earliest.containsKey(tp) && latest.containsKey(tp))
  132. .map(tp ->
  133. Map.entry(tp,
  134. new InternalPartitionsOffsets.Offsets(
  135. earliest.get(tp), latest.get(tp))))
  136. .collect(toMap(Map.Entry::getKey, Map.Entry::getValue)))
  137. .map(InternalPartitionsOffsets::new);
  138. }
  139. public Mono<TopicDetailsDTO> getTopicDetails(KafkaCluster cluster, String topicName) {
  140. return loadTopic(cluster, topicName).map(clusterMapper::toTopicDetails);
  141. }
  142. public Mono<List<TopicConfigDTO>> getTopicConfigs(KafkaCluster cluster, String topicName) {
  143. return adminClientService.get(cluster)
  144. .flatMap(ac -> ac.getTopicsConfig(List.of(topicName)))
  145. .map(m -> m.values().stream().findFirst().orElseThrow(TopicNotFoundException::new))
  146. .map(lst -> lst.stream()
  147. .map(InternalTopicConfig::from)
  148. .map(clusterMapper::toTopicConfig)
  149. .collect(toList()));
  150. }
  151. private Mono<InternalTopic> createTopic(KafkaCluster c, ReactiveAdminClient adminClient,
  152. Mono<TopicCreationDTO> topicCreation) {
  153. return topicCreation.flatMap(topicData ->
  154. adminClient.createTopic(
  155. topicData.getName(),
  156. topicData.getPartitions(),
  157. topicData.getReplicationFactor().shortValue(),
  158. topicData.getConfigs()
  159. ).thenReturn(topicData)
  160. )
  161. .onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
  162. .flatMap(topicData -> loadTopic(c, topicData.getName()));
  163. }
  164. public Mono<TopicDTO> createTopic(KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
  165. return adminClientService.get(cluster)
  166. .flatMap(ac -> createTopic(cluster, ac, topicCreation))
  167. .map(clusterMapper::toTopic);
  168. }
  169. private Mono<InternalTopic> updateTopic(KafkaCluster cluster,
  170. String topicName,
  171. TopicUpdateDTO topicUpdate) {
  172. return adminClientService.get(cluster)
  173. .flatMap(ac ->
  174. ac.updateTopicConfig(topicName, topicUpdate.getConfigs())
  175. .then(loadTopic(cluster, topicName)));
  176. }
  177. public Mono<TopicDTO> updateTopic(KafkaCluster cl, String topicName,
  178. Mono<TopicUpdateDTO> topicUpdate) {
  179. return topicUpdate
  180. .flatMap(t -> updateTopic(cl, topicName, t))
  181. .map(clusterMapper::toTopic);
  182. }
  183. private Mono<InternalTopic> changeReplicationFactor(
  184. KafkaCluster cluster,
  185. ReactiveAdminClient adminClient,
  186. String topicName,
  187. Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
  188. ) {
  189. return adminClient.alterPartitionReassignments(reassignments)
  190. .then(loadTopic(cluster, topicName));
  191. }
  192. /**
  193. * Change topic replication factor, works on brokers versions 5.4.x and higher
  194. */
  195. public Mono<ReplicationFactorChangeResponseDTO> changeReplicationFactor(
  196. KafkaCluster cluster,
  197. String topicName,
  198. ReplicationFactorChangeDTO replicationFactorChange) {
  199. return loadTopic(cluster, topicName).flatMap(topic -> adminClientService.get(cluster)
  200. .flatMap(ac -> {
  201. Integer actual = topic.getReplicationFactor();
  202. Integer requested = replicationFactorChange.getTotalReplicationFactor();
  203. Integer brokersCount = metricsCache.get(cluster).getClusterDescription()
  204. .getNodes().size();
  205. if (requested.equals(actual)) {
  206. return Mono.error(
  207. new ValidationException(
  208. String.format("Topic already has replicationFactor %s.", actual)));
  209. }
  210. if (requested > brokersCount) {
  211. return Mono.error(
  212. new ValidationException(
  213. String.format("Requested replication factor %s more than brokers count %s.",
  214. requested, brokersCount)));
  215. }
  216. return changeReplicationFactor(cluster, ac, topicName,
  217. getPartitionsReassignments(cluster, topic,
  218. replicationFactorChange));
  219. })
  220. .map(t -> new ReplicationFactorChangeResponseDTO()
  221. .topicName(t.getName())
  222. .totalReplicationFactor(t.getReplicationFactor())));
  223. }
  224. private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
  225. KafkaCluster cluster,
  226. InternalTopic topic,
  227. ReplicationFactorChangeDTO replicationFactorChange) {
  228. // Current assignment map (Partition number -> List of brokers)
  229. Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(topic);
  230. // Brokers map (Broker id -> count)
  231. Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
  232. int currentReplicationFactor = topic.getReplicationFactor();
  233. // If we should to increase Replication factor
  234. if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
  235. // For each partition
  236. for (var assignmentList : currentAssignment.values()) {
  237. // Get brokers list sorted by usage
  238. var brokers = brokersUsage.entrySet().stream()
  239. .sorted(Map.Entry.comparingByValue())
  240. .map(Map.Entry::getKey)
  241. .collect(toList());
  242. // Iterate brokers and try to add them in assignment
  243. // while (partition replicas count != requested replication factor)
  244. for (Integer broker : brokers) {
  245. if (!assignmentList.contains(broker)) {
  246. assignmentList.add(broker);
  247. brokersUsage.merge(broker, 1, Integer::sum);
  248. }
  249. if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) {
  250. break;
  251. }
  252. }
  253. if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) {
  254. throw new ValidationException("Something went wrong during adding replicas");
  255. }
  256. }
  257. // If we should to decrease Replication factor
  258. } else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) {
  259. for (Map.Entry<Integer, List<Integer>> assignmentEntry : currentAssignment.entrySet()) {
  260. var partition = assignmentEntry.getKey();
  261. var brokers = assignmentEntry.getValue();
  262. // Get brokers list sorted by usage in reverse order
  263. var brokersUsageList = brokersUsage.entrySet().stream()
  264. .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
  265. .map(Map.Entry::getKey)
  266. .collect(toList());
  267. // Iterate brokers and try to remove them from assignment
  268. // while (partition replicas count != requested replication factor)
  269. for (Integer broker : brokersUsageList) {
  270. // Check is the broker the leader of partition
  271. if (!topic.getPartitions().get(partition).getLeader()
  272. .equals(broker)) {
  273. brokers.remove(broker);
  274. brokersUsage.merge(broker, -1, Integer::sum);
  275. }
  276. if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) {
  277. break;
  278. }
  279. }
  280. if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) {
  281. throw new ValidationException("Something went wrong during removing replicas");
  282. }
  283. }
  284. } else {
  285. throw new ValidationException("Replication factor already equals requested");
  286. }
  287. // Return result map
  288. return currentAssignment.entrySet().stream().collect(toMap(
  289. e -> new TopicPartition(topic.getName(), e.getKey()),
  290. e -> Optional.of(new NewPartitionReassignment(e.getValue()))
  291. ));
  292. }
  293. private Map<Integer, List<Integer>> getCurrentAssignment(InternalTopic topic) {
  294. return topic.getPartitions().values().stream()
  295. .collect(toMap(
  296. InternalPartition::getPartition,
  297. p -> p.getReplicas().stream()
  298. .map(InternalReplica::getBroker)
  299. .collect(toList())
  300. ));
  301. }
  302. private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
  303. Map<Integer, List<Integer>> currentAssignment) {
  304. Map<Integer, Integer> result = metricsCache.get(cluster).getClusterDescription().getNodes()
  305. .stream()
  306. .map(Node::id)
  307. .collect(toMap(
  308. c -> c,
  309. c -> 0
  310. ));
  311. currentAssignment.values().forEach(brokers -> brokers
  312. .forEach(broker -> result.put(broker, result.get(broker) + 1)));
  313. return result;
  314. }
  315. public Mono<PartitionsIncreaseResponseDTO> increaseTopicPartitions(
  316. KafkaCluster cluster,
  317. String topicName,
  318. PartitionsIncreaseDTO partitionsIncrease) {
  319. return loadTopic(cluster, topicName).flatMap(topic ->
  320. adminClientService.get(cluster).flatMap(ac -> {
  321. Integer actualCount = topic.getPartitionCount();
  322. Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
  323. if (requestedCount < actualCount) {
  324. return Mono.error(
  325. new ValidationException(String.format(
  326. "Topic currently has %s partitions, which is higher than the requested %s.",
  327. actualCount, requestedCount)));
  328. }
  329. if (requestedCount.equals(actualCount)) {
  330. return Mono.error(
  331. new ValidationException(
  332. String.format("Topic already has %s partitions.", actualCount)));
  333. }
  334. Map<String, NewPartitions> newPartitionsMap = Collections.singletonMap(
  335. topicName,
  336. NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
  337. );
  338. return ac.createPartitions(newPartitionsMap)
  339. .then(loadTopic(cluster, topicName));
  340. })
  341. .map(t -> new PartitionsIncreaseResponseDTO()
  342. .topicName(t.getName())
  343. .totalPartitionsCount(t.getPartitionCount())));
  344. }
  345. public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
  346. if (metricsCache.get(cluster).getFeatures().contains(Feature.TOPIC_DELETION)) {
  347. return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName))
  348. .doOnSuccess(t -> metricsCache.onTopicDelete(cluster, topicName));
  349. } else {
  350. return Mono.error(new ValidationException("Topic deletion restricted"));
  351. }
  352. }
  353. public TopicMessageSchemaDTO getTopicSchema(KafkaCluster cluster, String topicName) {
  354. if (!metricsCache.get(cluster).getTopicDescriptions().containsKey(topicName)) {
  355. throw new TopicNotFoundException();
  356. }
  357. return deserializationService
  358. .getRecordDeserializerForCluster(cluster)
  359. .getTopicSchema(topicName);
  360. }
  361. @VisibleForTesting
  362. @Value
  363. static class Pagination {
  364. ReactiveAdminClient adminClient;
  365. MetricsCache.Metrics metrics;
  366. @Value
  367. static class Page {
  368. List<String> topics;
  369. int totalPages;
  370. }
  371. Mono<Page> getPage(
  372. Optional<Integer> pageNum,
  373. Optional<Integer> nullablePerPage,
  374. Optional<Boolean> showInternal,
  375. Optional<String> search,
  376. Optional<TopicColumnsToSortDTO> sortBy,
  377. Optional<SortOrderDTO> sortOrder) {
  378. return geTopicsForPagination()
  379. .map(paginatingTopics -> {
  380. Predicate<Integer> positiveInt = i -> i > 0;
  381. int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
  382. var topicsToSkip = (pageNum.filter(positiveInt).orElse(1) - 1) * perPage;
  383. var comparator = sortOrder.isEmpty() || !sortOrder.get().equals(SortOrderDTO.DESC)
  384. ? getComparatorForTopic(sortBy) : getComparatorForTopic(sortBy).reversed();
  385. List<InternalTopic> topics = paginatingTopics.stream()
  386. .filter(topic -> !topic.isInternal()
  387. || showInternal.map(i -> topic.isInternal() == i).orElse(true))
  388. .filter(topic ->
  389. search
  390. .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
  391. .orElse(true))
  392. .sorted(comparator)
  393. .collect(toList());
  394. var totalPages = (topics.size() / perPage)
  395. + (topics.size() % perPage == 0 ? 0 : 1);
  396. List<String> topicsToRender = topics.stream()
  397. .skip(topicsToSkip)
  398. .limit(perPage)
  399. .map(InternalTopic::getName)
  400. .collect(toList());
  401. return new Page(topicsToRender, totalPages);
  402. });
  403. }
  404. private Comparator<InternalTopic> getComparatorForTopic(
  405. Optional<TopicColumnsToSortDTO> sortBy) {
  406. var defaultComparator = Comparator.comparing(InternalTopic::getName);
  407. if (sortBy.isEmpty()) {
  408. return defaultComparator;
  409. }
  410. switch (sortBy.get()) {
  411. case TOTAL_PARTITIONS:
  412. return Comparator.comparing(InternalTopic::getPartitionCount);
  413. case OUT_OF_SYNC_REPLICAS:
  414. return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
  415. case REPLICATION_FACTOR:
  416. return Comparator.comparing(InternalTopic::getReplicationFactor);
  417. case NAME:
  418. default:
  419. return defaultComparator;
  420. }
  421. }
  422. private Mono<List<String>> filterExisting(Collection<String> topics) {
  423. return adminClient.listTopics(true)
  424. .map(existing -> existing.stream().filter(topics::contains).collect(toList()));
  425. }
  426. private Mono<List<InternalTopic>> geTopicsForPagination() {
  427. return filterExisting(metrics.getTopicDescriptions().keySet())
  428. .map(lst -> lst.stream()
  429. .map(topicName ->
  430. InternalTopic.from(
  431. metrics.getTopicDescriptions().get(topicName),
  432. metrics.getTopicConfigs().getOrDefault(topicName, List.of()),
  433. InternalPartitionsOffsets.empty(),
  434. metrics.getJmxMetrics(),
  435. metrics.getLogDirInfo()))
  436. .collect(toList())
  437. );
  438. }
  439. }
  440. }