123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532 |
- package com.provectus.kafka.ui.service;
- import static java.util.stream.Collectors.toList;
- import static java.util.stream.Collectors.toMap;
- import com.google.common.annotations.VisibleForTesting;
- import com.provectus.kafka.ui.exception.TopicMetadataException;
- import com.provectus.kafka.ui.exception.TopicNotFoundException;
- import com.provectus.kafka.ui.exception.TopicRecreationException;
- import com.provectus.kafka.ui.exception.ValidationException;
- import com.provectus.kafka.ui.mapper.ClusterMapper;
- import com.provectus.kafka.ui.model.Feature;
- import com.provectus.kafka.ui.model.InternalLogDirStats;
- import com.provectus.kafka.ui.model.InternalPartition;
- import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
- import com.provectus.kafka.ui.model.InternalReplica;
- import com.provectus.kafka.ui.model.InternalTopic;
- import com.provectus.kafka.ui.model.InternalTopicConfig;
- import com.provectus.kafka.ui.model.KafkaCluster;
- import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
- import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
- import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
- import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
- import com.provectus.kafka.ui.model.SortOrderDTO;
- import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
- import com.provectus.kafka.ui.model.TopicConfigDTO;
- import com.provectus.kafka.ui.model.TopicCreationDTO;
- import com.provectus.kafka.ui.model.TopicDTO;
- import com.provectus.kafka.ui.model.TopicDetailsDTO;
- import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
- import com.provectus.kafka.ui.model.TopicUpdateDTO;
- import com.provectus.kafka.ui.model.TopicsResponseDTO;
- import com.provectus.kafka.ui.serde.DeserializationService;
- import com.provectus.kafka.ui.util.JmxClusterUtil;
- import java.time.Duration;
- import java.util.Collection;
- import java.util.Collections;
- import java.util.Comparator;
- import java.util.List;
- import java.util.Map;
- import java.util.Optional;
- import java.util.function.Function;
- import java.util.function.Predicate;
- import java.util.stream.Collectors;
- import lombok.RequiredArgsConstructor;
- import org.apache.commons.lang3.StringUtils;
- import org.apache.kafka.clients.admin.ConfigEntry;
- import org.apache.kafka.clients.admin.NewPartitionReassignment;
- import org.apache.kafka.clients.admin.NewPartitions;
- import org.apache.kafka.clients.admin.OffsetSpec;
- import org.apache.kafka.clients.admin.TopicDescription;
- import org.apache.kafka.common.Node;
- import org.apache.kafka.common.TopicPartition;
- import org.apache.kafka.common.errors.TopicExistsException;
- import org.springframework.beans.factory.annotation.Value;
- import org.springframework.stereotype.Service;
- import reactor.core.publisher.Mono;
- import reactor.util.retry.Retry;
- @Service
- @RequiredArgsConstructor
- public class TopicsService {
- private static final Integer DEFAULT_PAGE_SIZE = 25;
- private final AdminClientService adminClientService;
- private final ClusterMapper clusterMapper;
- private final DeserializationService deserializationService;
- private final MetricsCache metricsCache;
- @Value("${topic.recreate.maxRetries:15}")
- private int recreateMaxRetries;
- @Value("${topic.recreate.delay.seconds:1}")
- private int recreateDelayInSeconds;
- public Mono<TopicsResponseDTO> getTopics(KafkaCluster cluster,
- Optional<Integer> pageNum,
- Optional<Integer> nullablePerPage,
- Optional<Boolean> showInternal,
- Optional<String> search,
- Optional<TopicColumnsToSortDTO> sortBy,
- Optional<SortOrderDTO> sortOrder) {
- return adminClientService.get(cluster).flatMap(ac ->
- new Pagination(ac, metricsCache.get(cluster))
- .getPage(pageNum, nullablePerPage, showInternal, search, sortBy, sortOrder)
- .flatMap(page ->
- loadTopics(cluster, page.getTopics())
- .map(topics ->
- new TopicsResponseDTO()
- .topics(topics.stream().map(clusterMapper::toTopic).collect(toList()))
- .pageCount(page.getTotalPages()))));
- }
- private Mono<List<InternalTopic>> loadTopics(KafkaCluster c, List<String> topics) {
- if (topics.isEmpty()) {
- return Mono.just(List.of());
- }
- return adminClientService.get(c)
- .flatMap(ac ->
- ac.describeTopics(topics).zipWith(ac.getTopicsConfig(topics),
- (descriptions, configs) -> {
- metricsCache.update(c, descriptions, configs);
- return getPartitionOffsets(descriptions, ac).map(offsets -> {
- var metrics = metricsCache.get(c);
- return createList(
- topics,
- descriptions,
- configs,
- offsets,
- metrics.getJmxMetrics(),
- metrics.getLogDirInfo()
- );
- });
- })).flatMap(Function.identity());
- }
- private Mono<InternalTopic> loadTopic(KafkaCluster c, String topicName) {
- return loadTopics(c, List.of(topicName))
- .map(lst -> lst.stream().findFirst().orElseThrow(TopicNotFoundException::new));
- }
- private List<InternalTopic> createList(List<String> orderedNames,
- Map<String, TopicDescription> descriptions,
- Map<String, List<ConfigEntry>> configs,
- InternalPartitionsOffsets partitionsOffsets,
- JmxClusterUtil.JmxMetrics jmxMetrics,
- InternalLogDirStats logDirInfo) {
- return orderedNames.stream()
- .filter(descriptions::containsKey)
- .map(t -> InternalTopic.from(
- descriptions.get(t),
- configs.getOrDefault(t, List.of()),
- partitionsOffsets,
- jmxMetrics,
- logDirInfo
- ))
- .collect(toList());
- }
- private Mono<InternalPartitionsOffsets> getPartitionOffsets(Map<String, TopicDescription>
- descriptions,
- ReactiveAdminClient ac) {
- var topicPartitions = descriptions.values().stream()
- .flatMap(desc ->
- desc.partitions().stream().map(p -> new TopicPartition(desc.name(), p.partition())))
- .collect(toList());
- return ac.listOffsets(topicPartitions, OffsetSpec.earliest())
- .zipWith(ac.listOffsets(topicPartitions, OffsetSpec.latest()),
- (earliest, latest) ->
- topicPartitions.stream()
- .filter(tp -> earliest.containsKey(tp) && latest.containsKey(tp))
- .map(tp ->
- Map.entry(tp,
- new InternalPartitionsOffsets.Offsets(
- earliest.get(tp), latest.get(tp))))
- .collect(toMap(Map.Entry::getKey, Map.Entry::getValue)))
- .map(InternalPartitionsOffsets::new);
- }
- public Mono<TopicDetailsDTO> getTopicDetails(KafkaCluster cluster, String topicName) {
- return loadTopic(cluster, topicName).map(clusterMapper::toTopicDetails);
- }
- public Mono<List<TopicConfigDTO>> getTopicConfigs(KafkaCluster cluster, String topicName) {
- return adminClientService.get(cluster)
- .flatMap(ac -> ac.getTopicsConfig(List.of(topicName)))
- .map(m -> m.values().stream().findFirst().orElseThrow(TopicNotFoundException::new))
- .map(lst -> lst.stream()
- .map(InternalTopicConfig::from)
- .map(clusterMapper::toTopicConfig)
- .collect(toList()));
- }
- private Mono<InternalTopic> createTopic(KafkaCluster c, ReactiveAdminClient adminClient,
- Mono<TopicCreationDTO> topicCreation) {
- return topicCreation.flatMap(topicData ->
- adminClient.createTopic(
- topicData.getName(),
- topicData.getPartitions(),
- topicData.getReplicationFactor().shortValue(),
- topicData.getConfigs()
- ).thenReturn(topicData)
- )
- .onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
- .flatMap(topicData -> loadTopic(c, topicData.getName()));
- }
- public Mono<TopicDTO> createTopic(KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
- return adminClientService.get(cluster)
- .flatMap(ac -> createTopic(cluster, ac, topicCreation))
- .map(clusterMapper::toTopic);
- }
- public Mono<TopicDTO> recreateTopic(KafkaCluster cluster, String topicName) {
- return loadTopic(cluster, topicName)
- .flatMap(t -> deleteTopic(cluster, topicName)
- .thenReturn(t).delayElement(Duration.ofSeconds(recreateDelayInSeconds))
- .flatMap(topic -> adminClientService.get(cluster).flatMap(ac -> ac.createTopic(topic.getName(),
- topic.getPartitionCount(),
- (short) topic.getReplicationFactor(),
- topic.getTopicConfigs()
- .stream()
- .collect(Collectors
- .toMap(InternalTopicConfig::getName,
- InternalTopicConfig::getValue)))
- .thenReturn(topicName))
- .retryWhen(Retry.fixedDelay(recreateMaxRetries,
- Duration.ofSeconds(recreateDelayInSeconds))
- .filter(TopicExistsException.class::isInstance)
- .onRetryExhaustedThrow((a, b) ->
- new TopicRecreationException(topicName,
- recreateMaxRetries * recreateDelayInSeconds)))
- .flatMap(a -> loadTopic(cluster, topicName)).map(clusterMapper::toTopic)
- )
- );
- }
- private Mono<InternalTopic> updateTopic(KafkaCluster cluster,
- String topicName,
- TopicUpdateDTO topicUpdate) {
- return adminClientService.get(cluster)
- .flatMap(ac ->
- ac.updateTopicConfig(topicName, topicUpdate.getConfigs())
- .then(loadTopic(cluster, topicName)));
- }
- public Mono<TopicDTO> updateTopic(KafkaCluster cl, String topicName,
- Mono<TopicUpdateDTO> topicUpdate) {
- return topicUpdate
- .flatMap(t -> updateTopic(cl, topicName, t))
- .map(clusterMapper::toTopic);
- }
- private Mono<InternalTopic> changeReplicationFactor(
- KafkaCluster cluster,
- ReactiveAdminClient adminClient,
- String topicName,
- Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
- ) {
- return adminClient.alterPartitionReassignments(reassignments)
- .then(loadTopic(cluster, topicName));
- }
- /**
- * Change topic replication factor, works on brokers versions 5.4.x and higher
- */
- public Mono<ReplicationFactorChangeResponseDTO> changeReplicationFactor(
- KafkaCluster cluster,
- String topicName,
- ReplicationFactorChangeDTO replicationFactorChange) {
- return loadTopic(cluster, topicName).flatMap(topic -> adminClientService.get(cluster)
- .flatMap(ac -> {
- Integer actual = topic.getReplicationFactor();
- Integer requested = replicationFactorChange.getTotalReplicationFactor();
- Integer brokersCount = metricsCache.get(cluster).getClusterDescription()
- .getNodes().size();
- if (requested.equals(actual)) {
- return Mono.error(
- new ValidationException(
- String.format("Topic already has replicationFactor %s.", actual)));
- }
- if (requested > brokersCount) {
- return Mono.error(
- new ValidationException(
- String.format("Requested replication factor %s more than brokers count %s.",
- requested, brokersCount)));
- }
- return changeReplicationFactor(cluster, ac, topicName,
- getPartitionsReassignments(cluster, topic,
- replicationFactorChange));
- })
- .map(t -> new ReplicationFactorChangeResponseDTO()
- .topicName(t.getName())
- .totalReplicationFactor(t.getReplicationFactor())));
- }
- private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
- KafkaCluster cluster,
- InternalTopic topic,
- ReplicationFactorChangeDTO replicationFactorChange) {
- // Current assignment map (Partition number -> List of brokers)
- Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(topic);
- // Brokers map (Broker id -> count)
- Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
- int currentReplicationFactor = topic.getReplicationFactor();
- // If we should to increase Replication factor
- if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
- // For each partition
- for (var assignmentList : currentAssignment.values()) {
- // Get brokers list sorted by usage
- var brokers = brokersUsage.entrySet().stream()
- .sorted(Map.Entry.comparingByValue())
- .map(Map.Entry::getKey)
- .collect(toList());
- // Iterate brokers and try to add them in assignment
- // while partition replicas count != requested replication factor
- for (Integer broker : brokers) {
- if (!assignmentList.contains(broker)) {
- assignmentList.add(broker);
- brokersUsage.merge(broker, 1, Integer::sum);
- }
- if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) {
- break;
- }
- }
- if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) {
- throw new ValidationException("Something went wrong during adding replicas");
- }
- }
- // If we should to decrease Replication factor
- } else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) {
- for (Map.Entry<Integer, List<Integer>> assignmentEntry : currentAssignment.entrySet()) {
- var partition = assignmentEntry.getKey();
- var brokers = assignmentEntry.getValue();
- // Get brokers list sorted by usage in reverse order
- var brokersUsageList = brokersUsage.entrySet().stream()
- .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
- .map(Map.Entry::getKey)
- .collect(toList());
- // Iterate brokers and try to remove them from assignment
- // while partition replicas count != requested replication factor
- for (Integer broker : brokersUsageList) {
- // Check is the broker the leader of partition
- if (!topic.getPartitions().get(partition).getLeader()
- .equals(broker)) {
- brokers.remove(broker);
- brokersUsage.merge(broker, -1, Integer::sum);
- }
- if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) {
- break;
- }
- }
- if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) {
- throw new ValidationException("Something went wrong during removing replicas");
- }
- }
- } else {
- throw new ValidationException("Replication factor already equals requested");
- }
- // Return result map
- return currentAssignment.entrySet().stream().collect(toMap(
- e -> new TopicPartition(topic.getName(), e.getKey()),
- e -> Optional.of(new NewPartitionReassignment(e.getValue()))
- ));
- }
- private Map<Integer, List<Integer>> getCurrentAssignment(InternalTopic topic) {
- return topic.getPartitions().values().stream()
- .collect(toMap(
- InternalPartition::getPartition,
- p -> p.getReplicas().stream()
- .map(InternalReplica::getBroker)
- .collect(toList())
- ));
- }
- private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
- Map<Integer, List<Integer>> currentAssignment) {
- Map<Integer, Integer> result = metricsCache.get(cluster).getClusterDescription().getNodes()
- .stream()
- .map(Node::id)
- .collect(toMap(
- c -> c,
- c -> 0
- ));
- currentAssignment.values().forEach(brokers -> brokers
- .forEach(broker -> result.put(broker, result.get(broker) + 1)));
- return result;
- }
- public Mono<PartitionsIncreaseResponseDTO> increaseTopicPartitions(
- KafkaCluster cluster,
- String topicName,
- PartitionsIncreaseDTO partitionsIncrease) {
- return loadTopic(cluster, topicName).flatMap(topic ->
- adminClientService.get(cluster).flatMap(ac -> {
- Integer actualCount = topic.getPartitionCount();
- Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
- if (requestedCount < actualCount) {
- return Mono.error(
- new ValidationException(String.format(
- "Topic currently has %s partitions, which is higher than the requested %s.",
- actualCount, requestedCount)));
- }
- if (requestedCount.equals(actualCount)) {
- return Mono.error(
- new ValidationException(
- String.format("Topic already has %s partitions.", actualCount)));
- }
- Map<String, NewPartitions> newPartitionsMap = Collections.singletonMap(
- topicName,
- NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
- );
- return ac.createPartitions(newPartitionsMap)
- .then(loadTopic(cluster, topicName));
- }).map(t -> new PartitionsIncreaseResponseDTO()
- .topicName(t.getName())
- .totalPartitionsCount(t.getPartitionCount())
- )
- );
- }
- public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
- if (metricsCache.get(cluster).getFeatures().contains(Feature.TOPIC_DELETION)) {
- return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName))
- .doOnSuccess(t -> metricsCache.onTopicDelete(cluster, topicName));
- } else {
- return Mono.error(new ValidationException("Topic deletion restricted"));
- }
- }
- public TopicMessageSchemaDTO getTopicSchema(KafkaCluster cluster, String topicName) {
- if (!metricsCache.get(cluster).getTopicDescriptions().containsKey(topicName)) {
- throw new TopicNotFoundException();
- }
- return deserializationService
- .getRecordDeserializerForCluster(cluster)
- .getTopicSchema(topicName);
- }
- public Mono<TopicDTO> cloneTopic(
- KafkaCluster cluster, String topicName, String newTopicName) {
- return loadTopic(cluster, topicName).flatMap(topic ->
- adminClientService.get(cluster).flatMap(ac -> ac.createTopic(newTopicName,
- topic.getPartitionCount(),
- (short) topic.getReplicationFactor(),
- topic.getTopicConfigs()
- .stream()
- .collect(Collectors.toMap(InternalTopicConfig::getName, InternalTopicConfig::getValue)))
- ).thenReturn(newTopicName).flatMap(a -> loadTopic(cluster, newTopicName)).map(clusterMapper::toTopic));
- }
- @VisibleForTesting
- @lombok.Value
- static class Pagination {
- ReactiveAdminClient adminClient;
- MetricsCache.Metrics metrics;
- @lombok.Value
- static class Page {
- List<String> topics;
- int totalPages;
- }
- Mono<Page> getPage(
- Optional<Integer> pageNum,
- Optional<Integer> nullablePerPage,
- Optional<Boolean> showInternal,
- Optional<String> search,
- Optional<TopicColumnsToSortDTO> sortBy,
- Optional<SortOrderDTO> sortOrder) {
- return geTopicsForPagination()
- .map(paginatingTopics -> {
- Predicate<Integer> positiveInt = i -> i > 0;
- int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
- var topicsToSkip = (pageNum.filter(positiveInt).orElse(1) - 1) * perPage;
- var comparator = sortOrder.isEmpty() || !sortOrder.get().equals(SortOrderDTO.DESC)
- ? getComparatorForTopic(sortBy) : getComparatorForTopic(sortBy).reversed();
- List<InternalTopic> topics = paginatingTopics.stream()
- .filter(topic -> !topic.isInternal()
- || showInternal.map(i -> topic.isInternal() == i).orElse(true))
- .filter(topic ->
- search
- .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
- .orElse(true))
- .sorted(comparator)
- .collect(toList());
- var totalPages = (topics.size() / perPage)
- + (topics.size() % perPage == 0 ? 0 : 1);
- List<String> topicsToRender = topics.stream()
- .skip(topicsToSkip)
- .limit(perPage)
- .map(InternalTopic::getName)
- .collect(toList());
- return new Page(topicsToRender, totalPages);
- });
- }
- private Comparator<InternalTopic> getComparatorForTopic(
- Optional<TopicColumnsToSortDTO> sortBy) {
- var defaultComparator = Comparator.comparing(InternalTopic::getName);
- if (sortBy.isEmpty()) {
- return defaultComparator;
- }
- switch (sortBy.get()) {
- case TOTAL_PARTITIONS:
- return Comparator.comparing(InternalTopic::getPartitionCount);
- case OUT_OF_SYNC_REPLICAS:
- return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
- case REPLICATION_FACTOR:
- return Comparator.comparing(InternalTopic::getReplicationFactor);
- case SIZE:
- return Comparator.comparing(InternalTopic::getSegmentSize);
- case NAME:
- default:
- return defaultComparator;
- }
- }
- private Mono<List<String>> filterExisting(Collection<String> topics) {
- return adminClient.listTopics(true)
- .map(existing -> existing.stream().filter(topics::contains).collect(toList()));
- }
- private Mono<List<InternalTopic>> geTopicsForPagination() {
- return filterExisting(metrics.getTopicDescriptions().keySet())
- .map(lst -> lst.stream()
- .map(topicName ->
- InternalTopic.from(
- metrics.getTopicDescriptions().get(topicName),
- metrics.getTopicConfigs().getOrDefault(topicName, List.of()),
- InternalPartitionsOffsets.empty(),
- metrics.getJmxMetrics(),
- metrics.getLogDirInfo()))
- .collect(toList())
- );
- }
- }
- }
|