|
@@ -1,12 +1,17 @@
|
|
|
package com.provectus.kafka.ui.service;
|
|
|
|
|
|
+import static java.util.stream.Collectors.toList;
|
|
|
+import static java.util.stream.Collectors.toMap;
|
|
|
+
|
|
|
+import com.google.common.annotations.VisibleForTesting;
|
|
|
import com.provectus.kafka.ui.exception.TopicMetadataException;
|
|
|
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
|
|
import com.provectus.kafka.ui.exception.ValidationException;
|
|
|
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
|
|
-import com.provectus.kafka.ui.model.CleanupPolicy;
|
|
|
import com.provectus.kafka.ui.model.Feature;
|
|
|
+import com.provectus.kafka.ui.model.InternalLogDirStats;
|
|
|
import com.provectus.kafka.ui.model.InternalPartition;
|
|
|
+import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
|
|
|
import com.provectus.kafka.ui.model.InternalReplica;
|
|
|
import com.provectus.kafka.ui.model.InternalTopic;
|
|
|
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
|
@@ -24,23 +29,26 @@ import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
|
|
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
|
|
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
|
|
import com.provectus.kafka.ui.serde.DeserializationService;
|
|
|
-import com.provectus.kafka.ui.util.ClusterUtil;
|
|
|
+import com.provectus.kafka.ui.util.JmxClusterUtil;
|
|
|
import java.util.Collection;
|
|
|
import java.util.Collections;
|
|
|
import java.util.Comparator;
|
|
|
import java.util.List;
|
|
|
import java.util.Map;
|
|
|
import java.util.Optional;
|
|
|
+import java.util.function.Function;
|
|
|
import java.util.function.Predicate;
|
|
|
-import java.util.stream.Collectors;
|
|
|
import lombok.RequiredArgsConstructor;
|
|
|
-import lombok.SneakyThrows;
|
|
|
+import lombok.Value;
|
|
|
import org.apache.commons.lang3.StringUtils;
|
|
|
+import org.apache.kafka.clients.admin.ConfigEntry;
|
|
|
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
|
|
import org.apache.kafka.clients.admin.NewPartitions;
|
|
|
+import org.apache.kafka.clients.admin.OffsetSpec;
|
|
|
+import org.apache.kafka.clients.admin.TopicDescription;
|
|
|
+import org.apache.kafka.common.Node;
|
|
|
import org.apache.kafka.common.TopicPartition;
|
|
|
import org.springframework.stereotype.Service;
|
|
|
-import reactor.core.publisher.Flux;
|
|
|
import reactor.core.publisher.Mono;
|
|
|
|
|
|
@Service
|
|
@@ -50,102 +58,110 @@ public class TopicsService {
|
|
|
private static final Integer DEFAULT_PAGE_SIZE = 25;
|
|
|
|
|
|
private final AdminClientService adminClientService;
|
|
|
- private final ConsumerGroupService consumerGroupService;
|
|
|
- private final ClustersStorage clustersStorage;
|
|
|
private final ClusterMapper clusterMapper;
|
|
|
private final DeserializationService deserializationService;
|
|
|
-
|
|
|
- public TopicsResponseDTO getTopics(KafkaCluster cluster,
|
|
|
- Optional<Integer> page,
|
|
|
- Optional<Integer> nullablePerPage,
|
|
|
- Optional<Boolean> showInternal,
|
|
|
- Optional<String> search,
|
|
|
- Optional<TopicColumnsToSortDTO> sortBy) {
|
|
|
- Predicate<Integer> positiveInt = i -> i > 0;
|
|
|
- int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
|
|
- var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
|
|
|
- List<InternalTopic> topics = cluster.getMetrics().getTopics().values().stream()
|
|
|
- .filter(topic -> !topic.isInternal()
|
|
|
- || showInternal
|
|
|
- .map(i -> topic.isInternal() == i)
|
|
|
- .orElse(true))
|
|
|
- .filter(topic ->
|
|
|
- search
|
|
|
- .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
|
|
|
- .orElse(true))
|
|
|
- .sorted(getComparatorForTopic(sortBy))
|
|
|
- .collect(Collectors.toList());
|
|
|
- var totalPages = (topics.size() / perPage)
|
|
|
- + (topics.size() % perPage == 0 ? 0 : 1);
|
|
|
- return new TopicsResponseDTO()
|
|
|
- .pageCount(totalPages)
|
|
|
- .topics(
|
|
|
- topics.stream()
|
|
|
- .skip(topicsToSkip)
|
|
|
- .limit(perPage)
|
|
|
- .map(t ->
|
|
|
- clusterMapper.toTopic(
|
|
|
- t.toBuilder().partitions(getTopicPartitions(cluster, t)).build()
|
|
|
- )
|
|
|
- )
|
|
|
- .collect(Collectors.toList())
|
|
|
- );
|
|
|
+ private final MetricsCache metricsCache;
|
|
|
+
|
|
|
+ public Mono<TopicsResponseDTO> getTopics(KafkaCluster cluster,
|
|
|
+ Optional<Integer> pageNum,
|
|
|
+ Optional<Integer> nullablePerPage,
|
|
|
+ Optional<Boolean> showInternal,
|
|
|
+ Optional<String> search,
|
|
|
+ Optional<TopicColumnsToSortDTO> sortBy) {
|
|
|
+ return adminClientService.get(cluster).flatMap(ac ->
|
|
|
+ new Pagination(ac, metricsCache.get(cluster))
|
|
|
+ .getPage(pageNum, nullablePerPage, showInternal, search, sortBy)
|
|
|
+ .flatMap(page ->
|
|
|
+ loadTopics(cluster, page.getTopics())
|
|
|
+ .map(topics ->
|
|
|
+ new TopicsResponseDTO()
|
|
|
+ .topics(topics.stream().map(clusterMapper::toTopic).collect(toList()))
|
|
|
+ .pageCount(page.getTotalPages()))));
|
|
|
}
|
|
|
|
|
|
- private Comparator<InternalTopic> getComparatorForTopic(Optional<TopicColumnsToSortDTO> sortBy) {
|
|
|
- var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
|
|
- if (sortBy.isEmpty()) {
|
|
|
- return defaultComparator;
|
|
|
- }
|
|
|
- switch (sortBy.get()) {
|
|
|
- case TOTAL_PARTITIONS:
|
|
|
- return Comparator.comparing(InternalTopic::getPartitionCount);
|
|
|
- case OUT_OF_SYNC_REPLICAS:
|
|
|
- return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
|
|
|
- case REPLICATION_FACTOR:
|
|
|
- return Comparator.comparing(InternalTopic::getReplicationFactor);
|
|
|
- case NAME:
|
|
|
- default:
|
|
|
- return defaultComparator;
|
|
|
+ private Mono<List<InternalTopic>> loadTopics(KafkaCluster c, List<String> topics) {
|
|
|
+ if (topics.isEmpty()) {
|
|
|
+ return Mono.just(List.of());
|
|
|
}
|
|
|
+ return adminClientService.get(c)
|
|
|
+ .flatMap(ac ->
|
|
|
+ ac.describeTopics(topics).zipWith(ac.getTopicsConfig(topics),
|
|
|
+ (descriptions, configs) -> {
|
|
|
+ metricsCache.update(c, descriptions, configs);
|
|
|
+ return getPartitionOffsets(descriptions, ac).map(offsets -> {
|
|
|
+ var metrics = metricsCache.get(c);
|
|
|
+ return createList(
|
|
|
+ topics,
|
|
|
+ descriptions,
|
|
|
+ configs,
|
|
|
+ offsets,
|
|
|
+ metrics.getJmxMetrics(),
|
|
|
+ metrics.getLogDirInfo()
|
|
|
+ );
|
|
|
+ });
|
|
|
+ })).flatMap(Function.identity());
|
|
|
}
|
|
|
|
|
|
- public TopicDetailsDTO getTopicDetails(KafkaCluster cluster, String topicName) {
|
|
|
- var topic = getTopic(cluster, topicName);
|
|
|
- var upToDatePartitions = getTopicPartitions(cluster, topic);
|
|
|
- topic = topic.toBuilder().partitions(upToDatePartitions).build();
|
|
|
- return clusterMapper.toTopicDetails(topic);
|
|
|
+ private Mono<InternalTopic> loadTopic(KafkaCluster c, String topicName) {
|
|
|
+ return loadTopics(c, List.of(topicName))
|
|
|
+ .map(lst -> lst.stream().findFirst().orElseThrow(TopicNotFoundException::new));
|
|
|
}
|
|
|
|
|
|
- @SneakyThrows
|
|
|
- public Mono<List<InternalTopic>> getTopicsData(ReactiveAdminClient client) {
|
|
|
- return client.listTopics(true)
|
|
|
- .flatMap(topics -> getTopicsData(client, topics).collectList());
|
|
|
+ private List<InternalTopic> createList(List<String> orderedNames,
|
|
|
+ Map<String, TopicDescription> descriptions,
|
|
|
+ Map<String, List<ConfigEntry>> configs,
|
|
|
+ InternalPartitionsOffsets partitionsOffsets,
|
|
|
+ JmxClusterUtil.JmxMetrics jmxMetrics,
|
|
|
+ InternalLogDirStats logDirInfo) {
|
|
|
+ return orderedNames.stream()
|
|
|
+ .filter(descriptions::containsKey)
|
|
|
+ .map(t -> InternalTopic.from(
|
|
|
+ descriptions.get(t),
|
|
|
+ configs.getOrDefault(t, List.of()),
|
|
|
+ partitionsOffsets,
|
|
|
+ jmxMetrics,
|
|
|
+ logDirInfo
|
|
|
+ ))
|
|
|
+ .collect(toList());
|
|
|
}
|
|
|
|
|
|
- private Flux<InternalTopic> getTopicsData(ReactiveAdminClient client, Collection<String> topics) {
|
|
|
- final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
|
|
|
- loadTopicsConfig(client, topics);
|
|
|
-
|
|
|
- return client.describeTopics(topics)
|
|
|
- .map(m -> m.values().stream()
|
|
|
- .map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()))
|
|
|
- .flatMap(internalTopics -> configsMono
|
|
|
- .map(configs -> mergeWithConfigs(internalTopics, configs).values()))
|
|
|
- .flatMapMany(Flux::fromIterable);
|
|
|
+ private Mono<InternalPartitionsOffsets> getPartitionOffsets(Map<String, TopicDescription>
|
|
|
+ descriptions,
|
|
|
+ ReactiveAdminClient ac) {
|
|
|
+ var topicPartitions = descriptions.values().stream()
|
|
|
+ .flatMap(desc ->
|
|
|
+ desc.partitions().stream().map(p -> new TopicPartition(desc.name(), p.partition())))
|
|
|
+ .collect(toList());
|
|
|
+
|
|
|
+ return ac.listOffsets(topicPartitions, OffsetSpec.earliest())
|
|
|
+ .zipWith(ac.listOffsets(topicPartitions, OffsetSpec.latest()),
|
|
|
+ (earliest, latest) ->
|
|
|
+ topicPartitions.stream()
|
|
|
+ .filter(tp -> earliest.containsKey(tp) && latest.containsKey(tp))
|
|
|
+ .map(tp ->
|
|
|
+ Map.entry(tp,
|
|
|
+ new InternalPartitionsOffsets.Offsets(
|
|
|
+ earliest.get(tp), latest.get(tp))))
|
|
|
+ .collect(toMap(Map.Entry::getKey, Map.Entry::getValue)))
|
|
|
+ .map(InternalPartitionsOffsets::new);
|
|
|
}
|
|
|
|
|
|
- public List<TopicConfigDTO> getTopicConfigs(KafkaCluster cluster, String topicName) {
|
|
|
- var configs = getTopic(cluster, topicName).getTopicConfigs();
|
|
|
- return configs.stream()
|
|
|
- .map(clusterMapper::toTopicConfig)
|
|
|
- .collect(Collectors.toList());
|
|
|
+ public Mono<TopicDetailsDTO> getTopicDetails(KafkaCluster cluster, String topicName) {
|
|
|
+ return loadTopic(cluster, topicName).map(clusterMapper::toTopicDetails);
|
|
|
}
|
|
|
|
|
|
+ public Mono<List<TopicConfigDTO>> getTopicConfigs(KafkaCluster cluster, String topicName) {
|
|
|
+ return adminClientService.get(cluster)
|
|
|
+ .flatMap(ac -> ac.getTopicsConfig(List.of(topicName)))
|
|
|
+ .map(m -> m.values().stream().findFirst().orElseThrow(TopicNotFoundException::new))
|
|
|
+ .map(lst -> lst.stream()
|
|
|
+ .map(InternalTopicConfig::from)
|
|
|
+ .map(clusterMapper::toTopicConfig)
|
|
|
+ .collect(toList()));
|
|
|
+ }
|
|
|
|
|
|
- @SneakyThrows
|
|
|
- private Mono<InternalTopic> createTopic(ReactiveAdminClient adminClient,
|
|
|
- Mono<TopicCreationDTO> topicCreation) {
|
|
|
+ private Mono<InternalTopic> createTopic(KafkaCluster c, ReactiveAdminClient adminClient,
|
|
|
+ Mono<TopicCreationDTO> topicCreation) {
|
|
|
return topicCreation.flatMap(topicData ->
|
|
|
adminClient.createTopic(
|
|
|
topicData.getName(),
|
|
@@ -155,73 +171,39 @@ public class TopicsService {
|
|
|
).thenReturn(topicData)
|
|
|
)
|
|
|
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
|
|
- .flatMap(topicData -> getUpdatedTopic(adminClient, topicData.getName()))
|
|
|
- .switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")));
|
|
|
+ .flatMap(topicData -> loadTopic(c, topicData.getName()));
|
|
|
}
|
|
|
|
|
|
- public Mono<TopicDTO> createTopic(
|
|
|
- KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
|
|
- return adminClientService.get(cluster).flatMap(ac -> createTopic(ac, topicCreation))
|
|
|
- .doOnNext(t -> clustersStorage.onTopicUpdated(cluster.getName(), t))
|
|
|
+ public Mono<TopicDTO> createTopic(KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
|
|
+ return adminClientService.get(cluster)
|
|
|
+ .flatMap(ac -> createTopic(cluster, ac, topicCreation))
|
|
|
.map(clusterMapper::toTopic);
|
|
|
}
|
|
|
|
|
|
- private Map<String, InternalTopic> mergeWithConfigs(
|
|
|
- List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
|
|
|
- return topics.stream()
|
|
|
- .map(t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build())
|
|
|
- .map(t -> t.toBuilder().cleanUpPolicy(
|
|
|
- CleanupPolicy.fromString(t.getTopicConfigs().stream()
|
|
|
- .filter(config -> config.getName().equals("cleanup.policy"))
|
|
|
- .findFirst()
|
|
|
- .orElseGet(() -> InternalTopicConfig.builder().value("unknown").build())
|
|
|
- .getValue())).build())
|
|
|
- .collect(Collectors.toMap(
|
|
|
- InternalTopic::getName,
|
|
|
- e -> e
|
|
|
- ));
|
|
|
- }
|
|
|
-
|
|
|
- public Mono<InternalTopic> getUpdatedTopic(ReactiveAdminClient ac, String topicName) {
|
|
|
- return getTopicsData(ac, List.of(topicName)).next();
|
|
|
- }
|
|
|
-
|
|
|
- public Mono<InternalTopic> updateTopic(KafkaCluster cluster,
|
|
|
+ private Mono<InternalTopic> updateTopic(KafkaCluster cluster,
|
|
|
String topicName,
|
|
|
TopicUpdateDTO topicUpdate) {
|
|
|
return adminClientService.get(cluster)
|
|
|
.flatMap(ac ->
|
|
|
- ac.updateTopicConfig(topicName,
|
|
|
- topicUpdate.getConfigs()).then(getUpdatedTopic(ac, topicName)));
|
|
|
+ ac.updateTopicConfig(topicName, topicUpdate.getConfigs())
|
|
|
+ .then(loadTopic(cluster, topicName)));
|
|
|
}
|
|
|
|
|
|
public Mono<TopicDTO> updateTopic(KafkaCluster cl, String topicName,
|
|
|
Mono<TopicUpdateDTO> topicUpdate) {
|
|
|
return topicUpdate
|
|
|
.flatMap(t -> updateTopic(cl, topicName, t))
|
|
|
- .doOnNext(t -> clustersStorage.onTopicUpdated(cl.getName(), t))
|
|
|
.map(clusterMapper::toTopic);
|
|
|
}
|
|
|
|
|
|
- @SneakyThrows
|
|
|
- private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
|
|
|
- ReactiveAdminClient client, Collection<String> topicNames) {
|
|
|
- return client.getTopicsConfig(topicNames)
|
|
|
- .map(configs ->
|
|
|
- configs.entrySet().stream().collect(Collectors.toMap(
|
|
|
- Map.Entry::getKey,
|
|
|
- c -> c.getValue().stream()
|
|
|
- .map(ClusterUtil::mapToInternalTopicConfig)
|
|
|
- .collect(Collectors.toList()))));
|
|
|
- }
|
|
|
-
|
|
|
private Mono<InternalTopic> changeReplicationFactor(
|
|
|
+ KafkaCluster cluster,
|
|
|
ReactiveAdminClient adminClient,
|
|
|
String topicName,
|
|
|
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
|
|
|
) {
|
|
|
return adminClient.alterPartitionReassignments(reassignments)
|
|
|
- .then(getUpdatedTopic(adminClient, topicName));
|
|
|
+ .then(loadTopic(cluster, topicName));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -231,11 +213,12 @@ public class TopicsService {
|
|
|
KafkaCluster cluster,
|
|
|
String topicName,
|
|
|
ReplicationFactorChangeDTO replicationFactorChange) {
|
|
|
- return adminClientService.get(cluster)
|
|
|
+ return loadTopic(cluster, topicName).flatMap(topic -> adminClientService.get(cluster)
|
|
|
.flatMap(ac -> {
|
|
|
- Integer actual = getTopic(cluster, topicName).getReplicationFactor();
|
|
|
+ Integer actual = topic.getReplicationFactor();
|
|
|
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
|
|
- Integer brokersCount = cluster.getMetrics().getBrokerCount();
|
|
|
+ Integer brokersCount = metricsCache.get(cluster).getClusterDescription()
|
|
|
+ .getNodes().size();
|
|
|
|
|
|
if (requested.equals(actual)) {
|
|
|
return Mono.error(
|
|
@@ -248,25 +231,24 @@ public class TopicsService {
|
|
|
String.format("Requested replication factor %s more than brokers count %s.",
|
|
|
requested, brokersCount)));
|
|
|
}
|
|
|
- return changeReplicationFactor(ac, topicName,
|
|
|
- getPartitionsReassignments(cluster, topicName,
|
|
|
+ return changeReplicationFactor(cluster, ac, topicName,
|
|
|
+ getPartitionsReassignments(cluster, topic,
|
|
|
replicationFactorChange));
|
|
|
})
|
|
|
- .doOnNext(topic -> clustersStorage.onTopicUpdated(cluster.getName(), topic))
|
|
|
.map(t -> new ReplicationFactorChangeResponseDTO()
|
|
|
.topicName(t.getName())
|
|
|
- .totalReplicationFactor(t.getReplicationFactor()));
|
|
|
+ .totalReplicationFactor(t.getReplicationFactor())));
|
|
|
}
|
|
|
|
|
|
private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
|
|
|
KafkaCluster cluster,
|
|
|
- String topicName,
|
|
|
+ InternalTopic topic,
|
|
|
ReplicationFactorChangeDTO replicationFactorChange) {
|
|
|
// Current assignment map (Partition number -> List of brokers)
|
|
|
- Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(cluster, topicName);
|
|
|
+ Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(topic);
|
|
|
// Brokers map (Broker id -> count)
|
|
|
Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
|
|
|
- int currentReplicationFactor = getTopic(cluster, topicName).getReplicationFactor();
|
|
|
+ int currentReplicationFactor = topic.getReplicationFactor();
|
|
|
|
|
|
// If we should to increase Replication factor
|
|
|
if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
|
|
@@ -276,7 +258,7 @@ public class TopicsService {
|
|
|
var brokers = brokersUsage.entrySet().stream()
|
|
|
.sorted(Map.Entry.comparingByValue())
|
|
|
.map(Map.Entry::getKey)
|
|
|
- .collect(Collectors.toList());
|
|
|
+ .collect(toList());
|
|
|
|
|
|
// Iterate brokers and try to add them in assignment
|
|
|
// while (partition replicas count != requested replication factor)
|
|
@@ -304,13 +286,13 @@ public class TopicsService {
|
|
|
var brokersUsageList = brokersUsage.entrySet().stream()
|
|
|
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
|
|
|
.map(Map.Entry::getKey)
|
|
|
- .collect(Collectors.toList());
|
|
|
+ .collect(toList());
|
|
|
|
|
|
// Iterate brokers and try to remove them from assignment
|
|
|
// while (partition replicas count != requested replication factor)
|
|
|
for (Integer broker : brokersUsageList) {
|
|
|
// Check is the broker the leader of partition
|
|
|
- if (!getTopic(cluster, topicName).getPartitions().get(partition).getLeader()
|
|
|
+ if (!topic.getPartitions().get(partition).getLeader()
|
|
|
.equals(broker)) {
|
|
|
brokers.remove(broker);
|
|
|
brokersUsage.merge(broker, -1, Integer::sum);
|
|
@@ -328,26 +310,28 @@ public class TopicsService {
|
|
|
}
|
|
|
|
|
|
// Return result map
|
|
|
- return currentAssignment.entrySet().stream().collect(Collectors.toMap(
|
|
|
- e -> new TopicPartition(topicName, e.getKey()),
|
|
|
+ return currentAssignment.entrySet().stream().collect(toMap(
|
|
|
+ e -> new TopicPartition(topic.getName(), e.getKey()),
|
|
|
e -> Optional.of(new NewPartitionReassignment(e.getValue()))
|
|
|
));
|
|
|
}
|
|
|
|
|
|
- private Map<Integer, List<Integer>> getCurrentAssignment(KafkaCluster cluster, String topicName) {
|
|
|
- return getTopic(cluster, topicName).getPartitions().values().stream()
|
|
|
- .collect(Collectors.toMap(
|
|
|
+ private Map<Integer, List<Integer>> getCurrentAssignment(InternalTopic topic) {
|
|
|
+ return topic.getPartitions().values().stream()
|
|
|
+ .collect(toMap(
|
|
|
InternalPartition::getPartition,
|
|
|
p -> p.getReplicas().stream()
|
|
|
.map(InternalReplica::getBroker)
|
|
|
- .collect(Collectors.toList())
|
|
|
+ .collect(toList())
|
|
|
));
|
|
|
}
|
|
|
|
|
|
private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
|
|
|
Map<Integer, List<Integer>> currentAssignment) {
|
|
|
- Map<Integer, Integer> result = cluster.getMetrics().getBrokers().stream()
|
|
|
- .collect(Collectors.toMap(
|
|
|
+ Map<Integer, Integer> result = metricsCache.get(cluster).getClusterDescription().getNodes()
|
|
|
+ .stream()
|
|
|
+ .map(Node::id)
|
|
|
+ .collect(toMap(
|
|
|
c -> c,
|
|
|
c -> 0
|
|
|
));
|
|
@@ -361,9 +345,9 @@ public class TopicsService {
|
|
|
KafkaCluster cluster,
|
|
|
String topicName,
|
|
|
PartitionsIncreaseDTO partitionsIncrease) {
|
|
|
- return adminClientService.get(cluster)
|
|
|
- .flatMap(ac -> {
|
|
|
- Integer actualCount = getTopic(cluster, topicName).getPartitionCount();
|
|
|
+ return loadTopic(cluster, topicName).flatMap(topic ->
|
|
|
+ adminClientService.get(cluster).flatMap(ac -> {
|
|
|
+ Integer actualCount = topic.getPartitionCount();
|
|
|
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
|
|
|
|
|
if (requestedCount < actualCount) {
|
|
@@ -383,55 +367,24 @@ public class TopicsService {
|
|
|
NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
|
|
|
);
|
|
|
return ac.createPartitions(newPartitionsMap)
|
|
|
- .then(getUpdatedTopic(ac, topicName));
|
|
|
+ .then(loadTopic(cluster, topicName));
|
|
|
})
|
|
|
- .doOnNext(t -> clustersStorage.onTopicUpdated(cluster.getName(), t))
|
|
|
.map(t -> new PartitionsIncreaseResponseDTO()
|
|
|
.topicName(t.getName())
|
|
|
- .totalPartitionsCount(t.getPartitionCount()));
|
|
|
- }
|
|
|
-
|
|
|
- private Map<Integer, InternalPartition> getTopicPartitions(KafkaCluster c, InternalTopic topic) {
|
|
|
- var tps = topic.getPartitions().values().stream()
|
|
|
- .map(t -> new TopicPartition(topic.getName(), t.getPartition()))
|
|
|
- .collect(Collectors.toList());
|
|
|
- Map<Integer, InternalPartition> partitions =
|
|
|
- topic.getPartitions().values().stream().collect(Collectors.toMap(
|
|
|
- InternalPartition::getPartition,
|
|
|
- tp -> tp
|
|
|
- ));
|
|
|
-
|
|
|
- try (var consumer = consumerGroupService.createConsumer(c)) {
|
|
|
- final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(tps);
|
|
|
- final Map<TopicPartition, Long> latest = consumer.endOffsets(tps);
|
|
|
-
|
|
|
- return tps.stream()
|
|
|
- .map(tp -> partitions.get(tp.partition())
|
|
|
- .withOffsets(
|
|
|
- earliest.getOrDefault(tp, -1L),
|
|
|
- latest.getOrDefault(tp, -1L)
|
|
|
- )
|
|
|
- ).collect(Collectors.toMap(
|
|
|
- InternalPartition::getPartition,
|
|
|
- tp -> tp
|
|
|
- ));
|
|
|
- } catch (Exception e) {
|
|
|
- return Collections.emptyMap();
|
|
|
- }
|
|
|
+ .totalPartitionsCount(t.getPartitionCount())));
|
|
|
}
|
|
|
|
|
|
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
|
|
- var topicDetails = getTopicDetails(cluster, topicName);
|
|
|
- if (cluster.getFeatures().contains(Feature.TOPIC_DELETION)) {
|
|
|
+ if (metricsCache.get(cluster).getFeatures().contains(Feature.TOPIC_DELETION)) {
|
|
|
return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName))
|
|
|
- .doOnSuccess(t -> clustersStorage.onTopicDeleted(cluster.getName(), topicName));
|
|
|
+ .doOnSuccess(t -> metricsCache.onTopicDelete(cluster, topicName));
|
|
|
} else {
|
|
|
return Mono.error(new ValidationException("Topic deletion restricted"));
|
|
|
}
|
|
|
}
|
|
|
|
|
|
public TopicMessageSchemaDTO getTopicSchema(KafkaCluster cluster, String topicName) {
|
|
|
- if (!cluster.getMetrics().getTopics().containsKey(topicName)) {
|
|
|
+ if (!metricsCache.get(cluster).getTopicDescriptions().containsKey(topicName)) {
|
|
|
throw new TopicNotFoundException();
|
|
|
}
|
|
|
return deserializationService
|
|
@@ -439,12 +392,88 @@ public class TopicsService {
|
|
|
.getTopicSchema(topicName);
|
|
|
}
|
|
|
|
|
|
- private InternalTopic getTopic(KafkaCluster c, String topicName) {
|
|
|
- var topic = c.getMetrics().getTopics().get(topicName);
|
|
|
- if (topic == null) {
|
|
|
- throw new TopicNotFoundException();
|
|
|
+ @VisibleForTesting
|
|
|
+ @Value
|
|
|
+ static class Pagination {
|
|
|
+ ReactiveAdminClient adminClient;
|
|
|
+ MetricsCache.Metrics metrics;
|
|
|
+
|
|
|
+ @Value
|
|
|
+ static class Page {
|
|
|
+ List<String> topics;
|
|
|
+ int totalPages;
|
|
|
+ }
|
|
|
+
|
|
|
+ Mono<Page> getPage(
|
|
|
+ Optional<Integer> pageNum,
|
|
|
+ Optional<Integer> nullablePerPage,
|
|
|
+ Optional<Boolean> showInternal,
|
|
|
+ Optional<String> search,
|
|
|
+ Optional<TopicColumnsToSortDTO> sortBy) {
|
|
|
+ return geTopicsForPagination()
|
|
|
+ .map(paginatingTopics -> {
|
|
|
+ Predicate<Integer> positiveInt = i -> i > 0;
|
|
|
+ int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
|
|
+ var topicsToSkip = (pageNum.filter(positiveInt).orElse(1) - 1) * perPage;
|
|
|
+ List<InternalTopic> topics = paginatingTopics.stream()
|
|
|
+ .filter(topic -> !topic.isInternal()
|
|
|
+ || showInternal.map(i -> topic.isInternal() == i).orElse(true))
|
|
|
+ .filter(topic ->
|
|
|
+ search
|
|
|
+ .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
|
|
|
+ .orElse(true))
|
|
|
+ .sorted(getComparatorForTopic(sortBy))
|
|
|
+ .collect(toList());
|
|
|
+ var totalPages = (topics.size() / perPage)
|
|
|
+ + (topics.size() % perPage == 0 ? 0 : 1);
|
|
|
+
|
|
|
+ List<String> topicsToRender = topics.stream()
|
|
|
+ .skip(topicsToSkip)
|
|
|
+ .limit(perPage)
|
|
|
+ .map(InternalTopic::getName)
|
|
|
+ .collect(toList());
|
|
|
+
|
|
|
+ return new Page(topicsToRender, totalPages);
|
|
|
+ });
|
|
|
+ }
|
|
|
+
|
|
|
+ private Comparator<InternalTopic> getComparatorForTopic(
|
|
|
+ Optional<TopicColumnsToSortDTO> sortBy) {
|
|
|
+ var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
|
|
+ if (sortBy.isEmpty()) {
|
|
|
+ return defaultComparator;
|
|
|
+ }
|
|
|
+ switch (sortBy.get()) {
|
|
|
+ case TOTAL_PARTITIONS:
|
|
|
+ return Comparator.comparing(InternalTopic::getPartitionCount);
|
|
|
+ case OUT_OF_SYNC_REPLICAS:
|
|
|
+ return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
|
|
|
+ case REPLICATION_FACTOR:
|
|
|
+ return Comparator.comparing(InternalTopic::getReplicationFactor);
|
|
|
+ case NAME:
|
|
|
+ default:
|
|
|
+ return defaultComparator;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private Mono<List<String>> filterExisting(Collection<String> topics) {
|
|
|
+ return adminClient.listTopics(true)
|
|
|
+ .map(existing -> existing.stream().filter(topics::contains).collect(toList()));
|
|
|
+ }
|
|
|
+
|
|
|
+ private Mono<List<InternalTopic>> geTopicsForPagination() {
|
|
|
+ return filterExisting(metrics.getTopicDescriptions().keySet())
|
|
|
+ .map(lst -> lst.stream()
|
|
|
+ .map(topicName ->
|
|
|
+ InternalTopic.from(
|
|
|
+ metrics.getTopicDescriptions().get(topicName),
|
|
|
+ metrics.getTopicConfigs().getOrDefault(topicName, List.of()),
|
|
|
+ InternalPartitionsOffsets.empty(),
|
|
|
+ metrics.getJmxMetrics(),
|
|
|
+ metrics.getLogDirInfo()))
|
|
|
+ .collect(toList())
|
|
|
+ );
|
|
|
}
|
|
|
- return topic;
|
|
|
}
|
|
|
|
|
|
}
|