|
@@ -1,9 +1,6 @@
|
|
|
package com.provectus.kafka.ui.kafka;
|
|
|
|
|
|
-import com.provectus.kafka.ui.cluster.model.InternalClusterMetrics;
|
|
|
-import com.provectus.kafka.ui.cluster.model.InternalTopic;
|
|
|
-import com.provectus.kafka.ui.cluster.model.InternalTopicConfig;
|
|
|
-import com.provectus.kafka.ui.cluster.model.KafkaCluster;
|
|
|
+import com.provectus.kafka.ui.cluster.model.*;
|
|
|
import com.provectus.kafka.ui.cluster.util.ClusterUtil;
|
|
|
import com.provectus.kafka.ui.model.ConsumerGroup;
|
|
|
import com.provectus.kafka.ui.model.ServerStatus;
|
|
@@ -17,6 +14,7 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
|
|
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
|
import org.apache.kafka.common.KafkaFuture;
|
|
|
import org.apache.kafka.common.Node;
|
|
|
+import org.apache.kafka.common.TopicPartition;
|
|
|
import org.apache.kafka.common.config.ConfigResource;
|
|
|
import org.apache.kafka.common.serialization.BytesDeserializer;
|
|
|
import org.apache.kafka.common.utils.Bytes;
|
|
@@ -25,12 +23,10 @@ import reactor.core.publisher.Mono;
|
|
|
import reactor.util.function.Tuple2;
|
|
|
import reactor.util.function.Tuples;
|
|
|
|
|
|
-import java.util.Collections;
|
|
|
-import java.util.List;
|
|
|
-import java.util.Map;
|
|
|
-import java.util.Properties;
|
|
|
+import java.util.*;
|
|
|
import java.util.concurrent.ConcurrentHashMap;
|
|
|
import java.util.stream.Collectors;
|
|
|
+import java.util.stream.Stream;
|
|
|
|
|
|
@Service
|
|
|
@RequiredArgsConstructor
|
|
@@ -41,15 +37,19 @@ public class KafkaService {
|
|
|
|
|
|
private final ZookeeperService zookeeperService;
|
|
|
private final Map<String, AdminClient> adminClientCache = new ConcurrentHashMap<>();
|
|
|
+ private final Map<AdminClient, Map<TopicPartition, Integer>> leadersCache = new ConcurrentHashMap<>();
|
|
|
|
|
|
@SneakyThrows
|
|
|
public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
|
|
return getOrCreateAdminClient(cluster).flatMap(
|
|
|
- ac -> getClusterMetrics(ac).flatMap( clusterMetrics ->
|
|
|
+ ac -> getClusterMetrics(ac)
|
|
|
+
|
|
|
+ .flatMap( clusterMetrics ->
|
|
|
getTopicsData(ac).flatMap( topics ->
|
|
|
loadTopicsConfig(ac, topics.stream().map(InternalTopic::getName).collect(Collectors.toList()))
|
|
|
- .map( configs -> mergeWithConfigs(topics, configs) )
|
|
|
- ).map( topics -> buildFromData(cluster, clusterMetrics, topics))
|
|
|
+ .map( configs -> mergeWithConfigs(topics, configs))
|
|
|
+ .flatMap(it -> updateSegmentMetrics(ac, clusterMetrics, it))
|
|
|
+ ).map( segmentSizeDto -> buildFromData(cluster, segmentSizeDto))
|
|
|
)
|
|
|
).onErrorResume(
|
|
|
e -> Mono.just(cluster.toBuilder()
|
|
@@ -59,7 +59,10 @@ public class KafkaService {
|
|
|
);
|
|
|
}
|
|
|
|
|
|
- private KafkaCluster buildFromData(KafkaCluster currentCluster, InternalClusterMetrics brokersMetrics, Map<String, InternalTopic> topics) {
|
|
|
+ private KafkaCluster buildFromData(KafkaCluster currentCluster, InternalSegmentSizeDto segmentSizeDto) {
|
|
|
+
|
|
|
+ var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
|
|
|
+ var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
|
|
|
|
|
|
InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = brokersMetrics.toBuilder();
|
|
|
|
|
@@ -134,6 +137,17 @@ public class KafkaService {
|
|
|
private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient) {
|
|
|
return ClusterUtil.toMono(adminClient.listTopics(LIST_TOPICS_OPTIONS).names())
|
|
|
.flatMap(topics -> ClusterUtil.toMono(adminClient.describeTopics(topics).all()))
|
|
|
+ .map(topic -> {
|
|
|
+ var leadersMap = topic.values().stream()
|
|
|
+ .flatMap(t -> t.partitions().stream()
|
|
|
+ .flatMap(t1 -> {
|
|
|
+ Map<TopicPartition, Integer> result = new HashMap<>();
|
|
|
+ result.put(new TopicPartition(t.name(), t1.partition()), t1.leader().id());
|
|
|
+ return Stream.of(result);
|
|
|
+ }));
|
|
|
+ leadersCache.put(adminClient, ClusterUtil.toSingleMap(leadersMap));
|
|
|
+ return topic;
|
|
|
+ })
|
|
|
.map( m -> m.values().stream().map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()));
|
|
|
}
|
|
|
|
|
@@ -142,11 +156,13 @@ public class KafkaService {
|
|
|
.flatMap(brokers ->
|
|
|
ClusterUtil.toMono(client.describeCluster().controller()).map(
|
|
|
c -> {
|
|
|
- InternalClusterMetrics.InternalClusterMetricsBuilder builder = InternalClusterMetrics.builder();
|
|
|
- builder.brokerCount(brokers.size()).activeControllers(c != null ? 1 : 0);
|
|
|
+ InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = InternalClusterMetrics.builder();
|
|
|
+ metricsBuilder.brokerCount(brokers.size()).activeControllers(c != null ? 1 : 0);
|
|
|
// TODO: fill bytes in/out metrics
|
|
|
- List<Integer> brokerIds = brokers.stream().map(Node::id).collect(Collectors.toList());
|
|
|
- return builder.build();
|
|
|
+ metricsBuilder
|
|
|
+ .internalBrokerMetrics((brokers.stream().map(Node::id).collect(Collectors.toMap(k -> k, v -> InternalBrokerMetrics.builder().build()))));
|
|
|
+
|
|
|
+ return metricsBuilder.build();
|
|
|
}
|
|
|
)
|
|
|
);
|
|
@@ -236,7 +252,6 @@ public class KafkaService {
|
|
|
|
|
|
public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
|
|
|
var adminClient = this.createAdminClient(cluster);
|
|
|
-
|
|
|
return ClusterUtil.toMono(adminClient.listConsumerGroups().all())
|
|
|
.flatMap(s -> ClusterUtil.toMono(adminClient
|
|
|
.describeConsumerGroups(s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())).all()))
|
|
@@ -263,4 +278,45 @@ public class KafkaService {
|
|
|
.iterator()
|
|
|
.next());
|
|
|
}
|
|
|
+
|
|
|
+ private Mono<InternalSegmentSizeDto> updateSegmentMetrics(AdminClient ac, InternalClusterMetrics clusterMetrics, Map<String, InternalTopic> internalTopic) {
|
|
|
+ return ClusterUtil.toMono(ac.describeTopics(internalTopic.keySet()).all()).flatMap(topic ->
|
|
|
+ ClusterUtil.toMono(ac.describeLogDirs(clusterMetrics.getInternalBrokerMetrics().keySet()).all())
|
|
|
+ .map(log -> {
|
|
|
+ var partitionSegmentSizeStream = leadersCache.get(ac).entrySet().stream()
|
|
|
+ .flatMap(l -> {
|
|
|
+ Map<TopicPartition, Long> result = new HashMap<>();
|
|
|
+ result.put(l.getKey(), log.get(l.getValue()).values().stream().mapToLong(e -> e.replicaInfos.get(l.getKey()).size).sum());
|
|
|
+ return Stream.of(result);
|
|
|
+ });
|
|
|
+ var partitionSegmentSize = ClusterUtil.toSingleMap(partitionSegmentSizeStream);
|
|
|
+
|
|
|
+ var resultTopicMetricsStream = internalTopic.keySet().stream().flatMap(k -> {
|
|
|
+ Map<String, InternalTopic> result = new HashMap<>();
|
|
|
+ result.put(k, internalTopic.get(k).toBuilder()
|
|
|
+ .segmentSize(partitionSegmentSize.entrySet().stream().filter(e -> e.getKey().topic().equals(k)).mapToLong(Map.Entry::getValue).sum())
|
|
|
+ .partitionSegmentSize(partitionSegmentSize.entrySet().stream().filter(e -> e.getKey().topic().equals(k)).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).build());
|
|
|
+ return Stream.of(result);
|
|
|
+ });
|
|
|
+
|
|
|
+ var resultBrokerMetricsStream = clusterMetrics.getInternalBrokerMetrics().entrySet().stream().map(
|
|
|
+ e -> {
|
|
|
+ var brokerSegmentSize = log.get(e.getKey()).values().stream()
|
|
|
+ .mapToLong(v -> v.replicaInfos.values().stream()
|
|
|
+ .mapToLong(r -> r.size).sum()).sum();
|
|
|
+ InternalBrokerMetrics tempBrokerMetrics = InternalBrokerMetrics.builder().segmentSize(brokerSegmentSize).build();
|
|
|
+ return Collections.singletonMap(e.getKey(), tempBrokerMetrics);
|
|
|
+ });
|
|
|
+
|
|
|
+ var resultClusterMetrics = clusterMetrics.toBuilder()
|
|
|
+ .internalBrokerMetrics(ClusterUtil.toSingleMap(resultBrokerMetricsStream))
|
|
|
+ .segmentSize(partitionSegmentSize.values().stream().reduce(Long::sum).orElseThrow())
|
|
|
+ .build();
|
|
|
+
|
|
|
+ return InternalSegmentSizeDto.builder()
|
|
|
+ .clusterMetricsWithSegmentSize(resultClusterMetrics)
|
|
|
+ .internalTopicWithSegmentSize(ClusterUtil.toSingleMap(resultTopicMetricsStream)).build();
|
|
|
+ })
|
|
|
+ );
|
|
|
+ }
|
|
|
}
|