|
@@ -1,10 +1,11 @@
|
|
|
package com.provectus.kafka.ui.kafka;
|
|
|
|
|
|
-import com.provectus.kafka.ui.cluster.mapper.BrokersMetricsMapper;
|
|
|
-import com.provectus.kafka.ui.cluster.mapper.ClusterDtoMapper;
|
|
|
import com.provectus.kafka.ui.cluster.model.*;
|
|
|
import com.provectus.kafka.ui.cluster.util.ClusterUtil;
|
|
|
-import com.provectus.kafka.ui.model.*;
|
|
|
+import com.provectus.kafka.ui.model.Cluster;
|
|
|
+import com.provectus.kafka.ui.model.ConsumerGroup;
|
|
|
+import com.provectus.kafka.ui.model.ServerStatus;
|
|
|
+import com.provectus.kafka.ui.model.TopicFormData;
|
|
|
import com.provectus.kafka.ui.zookeeper.ZookeeperService;
|
|
|
import lombok.RequiredArgsConstructor;
|
|
|
import lombok.SneakyThrows;
|
|
@@ -13,15 +14,17 @@ import org.apache.kafka.clients.admin.*;
|
|
|
import org.apache.kafka.common.KafkaFuture;
|
|
|
import org.apache.kafka.common.Metric;
|
|
|
import org.apache.kafka.common.MetricName;
|
|
|
+import org.apache.kafka.common.Node;
|
|
|
import org.apache.kafka.common.config.ConfigResource;
|
|
|
+import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
|
|
import org.springframework.stereotype.Service;
|
|
|
-import reactor.core.publisher.Flux;
|
|
|
import reactor.core.publisher.Mono;
|
|
|
+import reactor.util.function.Tuple2;
|
|
|
+import reactor.util.function.Tuples;
|
|
|
|
|
|
import java.util.*;
|
|
|
import java.util.concurrent.ConcurrentHashMap;
|
|
|
import java.util.stream.Collectors;
|
|
|
-import java.util.stream.Stream;
|
|
|
|
|
|
import static com.provectus.kafka.ui.kafka.KafkaConstants.*;
|
|
|
import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
|
|
@@ -31,135 +34,142 @@ import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_
|
|
|
@Log4j2
|
|
|
public class KafkaService {
|
|
|
|
|
|
+ private static final ListTopicsOptions LIST_TOPICS_OPTIONS = new ListTopicsOptions().listInternal(true);
|
|
|
+
|
|
|
private final ZookeeperService zookeeperService;
|
|
|
+ private final Map<String, AdminClient> adminClientCache = new ConcurrentHashMap<>();
|
|
|
|
|
|
- private Map<String, AdminClient> adminClientCache = new ConcurrentHashMap<>();
|
|
|
+ @SneakyThrows
|
|
|
+ public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
|
|
+ return getOrCreateAdminClient(cluster).flatMap(
|
|
|
+ ac -> getClusterMetrics(ac).flatMap( clusterMetrics ->
|
|
|
+ getTopicsData(ac).flatMap( topics ->
|
|
|
+ loadTopicsConfig(ac, topics.stream().map(InternalTopic::getName).collect(Collectors.toList()))
|
|
|
+ .map( configs -> mergeWithConfigs(topics, configs) )
|
|
|
+ ).map( topics -> buildFromData(cluster, clusterMetrics, topics))
|
|
|
+ )
|
|
|
+ ).onErrorResume(
|
|
|
+ e -> Mono.just(cluster.toBuilder()
|
|
|
+ .status(ServerStatus.OFFLINE)
|
|
|
+ .lastKafkaException(e)
|
|
|
+ .build())
|
|
|
+ );
|
|
|
+ }
|
|
|
|
|
|
- private final ClusterDtoMapper clusterDtoMapper;
|
|
|
+ private KafkaCluster buildFromData(KafkaCluster currentCluster, InternalClusterMetrics brokersMetrics, Map<String, InternalTopic> topics) {
|
|
|
|
|
|
- private final BrokersMetricsMapper brokersMetricsMapper;
|
|
|
+ InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = brokersMetrics.toBuilder();
|
|
|
|
|
|
- @SneakyThrows
|
|
|
- public Mono<ClusterWithId> getUpdatedCluster(ClusterWithId clusterWithId) {
|
|
|
- var internalCluster = clusterWithId.getKafkaCluster();
|
|
|
- return getOrCreateAdminClient(clusterWithId).flatMap(
|
|
|
- ac ->
|
|
|
- getClusterMetrics(ac).flatMap(
|
|
|
- internalMetrics ->
|
|
|
- getTopicsData(ac)
|
|
|
- .flatMap(topics ->
|
|
|
- loadTopicConfig(ac, topics.stream().map(InternalTopic::getName).collect(Collectors.toList())).collectList()
|
|
|
- .map(s -> s.stream().collect(HashMap<String, List<TopicConfig>>::new, HashMap::putAll, HashMap::putAll))
|
|
|
- .map(s -> s.entrySet().stream().map(t -> InternalTopic.builder()
|
|
|
- .name(t.getKey())
|
|
|
- .topicConfigs(t.getValue())
|
|
|
- .topicDetails(topics.stream().filter(to -> to.getName().equals(t.getKey())).findFirst().orElseThrow().getTopicDetails())
|
|
|
- .partitions(topics.stream().filter(to -> to.getName().equals(t.getKey())).findFirst().orElseThrow().getPartitions())
|
|
|
- .build()).collect(Collectors.toList()))
|
|
|
- ).map(topics -> {
|
|
|
-
|
|
|
- InternalBrokersMetrics brokersMetrics = internalCluster.getBrokersMetrics() != null
|
|
|
- ? brokersMetricsMapper.toBrokersMetricsDto(internalCluster.getBrokersMetrics()) : InternalBrokersMetrics.builder().build();
|
|
|
- resetPartitionMetrics(brokersMetrics);
|
|
|
- brokersMetrics.setActiveControllers(internalMetrics.getActiveControllers());
|
|
|
- brokersMetrics.setZooKeeperStatus(zookeeperService.isZookeeperOnline(internalCluster) ? 1 : 0);
|
|
|
- brokersMetrics.setBrokerCount(internalMetrics.getBrokerCount());
|
|
|
- var internalBrokersMetrics = updateBrokersMetrics(brokersMetrics, topics);
|
|
|
-
|
|
|
- InternalCluster cluster = clusterDtoMapper.toClusterDto(internalCluster.getCluster());
|
|
|
- cluster.setStatus(ServerStatus.ONLINE);
|
|
|
- cluster.setBytesInPerSec(internalMetrics.getBytesInPerSec());
|
|
|
- cluster.setBytesOutPerSec(internalMetrics.getBytesOutPerSec());
|
|
|
- cluster.setBrokerCount(internalMetrics.getBrokerCount());
|
|
|
- cluster.setTopicCount(topics.size());
|
|
|
- cluster.setOnlinePartitionCount(internalBrokersMetrics.getOnlinePartitionCount());
|
|
|
-
|
|
|
- return ClusterWithId.builder()
|
|
|
- .id(internalCluster.getName())
|
|
|
- .kafkaCluster(
|
|
|
- KafkaCluster.builder().topics(ClusterUtil.convertToExternalTopicList(topics))
|
|
|
- .name(cluster.getName())
|
|
|
- .zookeeperStatus(zookeeperService.isZookeeperOnline(internalCluster) ? ServerStatus.ONLINE : ServerStatus.OFFLINE)
|
|
|
- .cluster(clusterDtoMapper.toCluster(cluster))
|
|
|
- .brokersMetrics(brokersMetricsMapper.toBrokersMetrics(internalBrokersMetrics))
|
|
|
- .build()
|
|
|
- ).build();
|
|
|
- })
|
|
|
- )
|
|
|
- ).onErrorResume(
|
|
|
- e -> {
|
|
|
- InternalCluster cluster = clusterDtoMapper.toClusterDto(internalCluster.getCluster());
|
|
|
- cluster.setStatus(ServerStatus.OFFLINE);
|
|
|
- return Mono.just(clusterWithId.toBuilder().kafkaCluster(
|
|
|
- internalCluster.toBuilder()
|
|
|
- .lastKafkaException(e)
|
|
|
- .cluster(clusterDtoMapper.toCluster(cluster))
|
|
|
- .build()
|
|
|
- ).build());
|
|
|
- }
|
|
|
- );
|
|
|
+ InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics);
|
|
|
+
|
|
|
+ ServerStatus zookeeperStatus = ServerStatus.OFFLINE;
|
|
|
+ Throwable zookeeperException = null;
|
|
|
+ try {
|
|
|
+ zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster) ? ServerStatus.ONLINE : ServerStatus.OFFLINE;
|
|
|
+ } catch (Throwable e) {
|
|
|
+ zookeeperException = e;
|
|
|
+ }
|
|
|
+
|
|
|
+ InternalClusterMetrics clusterMetrics = metricsBuilder
|
|
|
+ .activeControllers(brokersMetrics.getActiveControllers())
|
|
|
+ .brokerCount(brokersMetrics.getBrokerCount())
|
|
|
+ .underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount())
|
|
|
+ .inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount())
|
|
|
+ .outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount())
|
|
|
+ .onlinePartitionCount(topicsMetrics.getOnlinePartitionCount())
|
|
|
+ .offlinePartitionCount(topicsMetrics.getOfflinePartitionCount()).build();
|
|
|
+
|
|
|
+ return currentCluster.toBuilder()
|
|
|
+ .status(ServerStatus.ONLINE)
|
|
|
+ .zookeeperStatus(zookeeperStatus)
|
|
|
+ .lastZookeeperException(zookeeperException)
|
|
|
+ .lastKafkaException(null)
|
|
|
+ .metrics(clusterMetrics)
|
|
|
+ .topics(topics)
|
|
|
+ .build();
|
|
|
+ }
|
|
|
+
|
|
|
+ private InternalClusterMetrics collectTopicsMetrics(Map<String,InternalTopic> topics) {
|
|
|
+
|
|
|
+ int underReplicatedPartitions = 0;
|
|
|
+ int inSyncReplicasCount = 0;
|
|
|
+ int outOfSyncReplicasCount = 0;
|
|
|
+ int onlinePartitionCount = 0;
|
|
|
+ int offlinePartitionCount = 0;
|
|
|
+
|
|
|
+ for (InternalTopic topic : topics.values()) {
|
|
|
+ underReplicatedPartitions += topic.getUnderReplicatedPartitions();
|
|
|
+ inSyncReplicasCount += topic.getInSyncReplicas();
|
|
|
+ outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas());
|
|
|
+ onlinePartitionCount += topic.getPartitions().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1).sum();
|
|
|
+ offlinePartitionCount += topic.getPartitions().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1).sum();
|
|
|
+ }
|
|
|
+
|
|
|
+ return InternalClusterMetrics.builder()
|
|
|
+ .underReplicatedPartitionCount(underReplicatedPartitions)
|
|
|
+ .inSyncReplicasCount(inSyncReplicasCount)
|
|
|
+ .outOfSyncReplicasCount(outOfSyncReplicasCount)
|
|
|
+ .onlinePartitionCount(onlinePartitionCount)
|
|
|
+ .offlinePartitionCount(offlinePartitionCount)
|
|
|
+ .build();
|
|
|
+ }
|
|
|
+
|
|
|
+ private Map<String, InternalTopic> mergeWithConfigs(List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
|
|
|
+ return topics.stream().map(
|
|
|
+ t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build()
|
|
|
+ ).collect(Collectors.toMap(
|
|
|
+ InternalTopic::getName,
|
|
|
+ e -> e
|
|
|
+ ));
|
|
|
}
|
|
|
|
|
|
@SneakyThrows
|
|
|
private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient) {
|
|
|
- ListTopicsOptions listTopicsOptions = new ListTopicsOptions();
|
|
|
- listTopicsOptions.listInternal(true);
|
|
|
- return ClusterUtil.toMono(adminClient.listTopics(listTopicsOptions).names())
|
|
|
- .map(tl -> {
|
|
|
- DescribeTopicsResult topicDescriptionsWrapper = adminClient.describeTopics(tl);
|
|
|
- Map<String, KafkaFuture<TopicDescription>> topicDescriptionFuturesMap = topicDescriptionsWrapper.values();
|
|
|
- return topicDescriptionFuturesMap.entrySet();
|
|
|
- })
|
|
|
- .flatMapMany(Flux::fromIterable)
|
|
|
- .flatMap(s -> ClusterUtil.toMono(s.getValue()))
|
|
|
- .map(this::collectTopicData)
|
|
|
- .collectList();
|
|
|
+ return ClusterUtil.toMono(adminClient.listTopics(LIST_TOPICS_OPTIONS).names())
|
|
|
+ .flatMap(topics -> ClusterUtil.toMono(adminClient.describeTopics(topics).all()))
|
|
|
+ .map( m -> m.values().stream().map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()));
|
|
|
}
|
|
|
|
|
|
- private Mono<InternalMetrics> getClusterMetrics(AdminClient client) {
|
|
|
+ private Mono<InternalClusterMetrics> getClusterMetrics(AdminClient client) {
|
|
|
return ClusterUtil.toMono(client.describeCluster().nodes())
|
|
|
- .map(Collection::size)
|
|
|
.flatMap(brokers ->
|
|
|
ClusterUtil.toMono(client.describeCluster().controller()).map(
|
|
|
c -> {
|
|
|
- InternalMetrics internalMetrics = new InternalMetrics();
|
|
|
- internalMetrics.setBrokerCount(brokers);
|
|
|
- internalMetrics.setActiveControllers(c != null ? 1 : 0);
|
|
|
- for (Map.Entry<MetricName, ? extends Metric> metricNameEntry : client.metrics().entrySet()) {
|
|
|
- if (metricNameEntry.getKey().name().equals(IN_BYTE_PER_SEC_METRIC)
|
|
|
- && metricNameEntry.getKey().description().equals(IN_BYTE_PER_SEC_METRIC_DESCRIPTION)) {
|
|
|
- internalMetrics.setBytesInPerSec((int) Math.round((double) metricNameEntry.getValue().metricValue()));
|
|
|
- }
|
|
|
- if (metricNameEntry.getKey().name().equals(OUT_BYTE_PER_SEC_METRIC)
|
|
|
- && metricNameEntry.getKey().description().equals(OUT_BYTE_PER_SEC_METRIC_DESCRIPTION)) {
|
|
|
- internalMetrics.setBytesOutPerSec((int) Math.round((double) metricNameEntry.getValue().metricValue()));
|
|
|
- }
|
|
|
- }
|
|
|
- return internalMetrics;
|
|
|
+ InternalClusterMetrics.InternalClusterMetricsBuilder builder = InternalClusterMetrics.builder();
|
|
|
+ builder.brokerCount(brokers.size()).activeControllers(c != null ? 1 : 0);
|
|
|
+ // TODO: fill bytes in/out metrics
|
|
|
+ List<Integer> brokerIds = brokers.stream().map(Node::id).collect(Collectors.toList());
|
|
|
+
|
|
|
+ return builder.build();
|
|
|
}
|
|
|
)
|
|
|
);
|
|
|
}
|
|
|
|
|
|
|
|
|
+ public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicFormData> topicFormData) {
|
|
|
+ AdminClient adminClient = this.createAdminClient(cluster);
|
|
|
+ return this.createTopic(adminClient, topicFormData);
|
|
|
+ }
|
|
|
+
|
|
|
@SneakyThrows
|
|
|
- public Mono<Topic> createTopic(AdminClient adminClient, KafkaCluster cluster, Mono<TopicFormData> topicFormData) {
|
|
|
+ public Mono<InternalTopic> createTopic(AdminClient adminClient, Mono<TopicFormData> topicFormData) {
|
|
|
return topicFormData.flatMap(
|
|
|
topicData -> {
|
|
|
NewTopic newTopic = new NewTopic(topicData.getName(), topicData.getPartitions(), topicData.getReplicationFactor().shortValue());
|
|
|
newTopic.configs(topicData.getConfigs());
|
|
|
- createTopic(adminClient, newTopic);
|
|
|
- return topicFormData;
|
|
|
+ return createTopic(adminClient, newTopic).map( v -> topicData);
|
|
|
}).flatMap(topicData -> {
|
|
|
var tdw = adminClient.describeTopics(Collections.singletonList(topicData.getName()));
|
|
|
return getTopicDescription(tdw.values().get(topicData.getName()), topicData.getName());
|
|
|
- }).map(s -> {
|
|
|
- if (s == null) {
|
|
|
- throw new RuntimeException("Can't find created topic");
|
|
|
- }
|
|
|
- return s;
|
|
|
- }).map(s -> getUpdatedCluster(new ClusterWithId(cluster.getName(), cluster)))
|
|
|
- .map(s -> new Topic());
|
|
|
+ })
|
|
|
+ .switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")))
|
|
|
+ .map(ClusterUtil::mapToInternalTopic)
|
|
|
+ .flatMap( t ->
|
|
|
+ loadTopicsConfig(adminClient, Collections.singletonList(t.getName()))
|
|
|
+ .map( c -> mergeWithConfigs(Collections.singletonList(t), c))
|
|
|
+ .map( m -> m.values().iterator().next())
|
|
|
+ );
|
|
|
}
|
|
|
|
|
|
@SneakyThrows
|
|
@@ -168,10 +178,10 @@ public class KafkaService {
|
|
|
}
|
|
|
|
|
|
|
|
|
- public Mono<AdminClient> getOrCreateAdminClient(ClusterWithId clusterWithId) {
|
|
|
+ public Mono<AdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
|
|
|
AdminClient adminClient = adminClientCache.computeIfAbsent(
|
|
|
- clusterWithId.getId(),
|
|
|
- (id) -> createAdminClient(clusterWithId.getKafkaCluster())
|
|
|
+ cluster.getId(),
|
|
|
+ (id) -> createAdminClient(cluster)
|
|
|
);
|
|
|
|
|
|
return isAdminClientConnected(adminClient);
|
|
@@ -188,57 +198,7 @@ public class KafkaService {
|
|
|
return getClusterId(adminClient).map( r -> adminClient);
|
|
|
}
|
|
|
|
|
|
- private void resetPartitionMetrics(InternalBrokersMetrics brokersMetrics) {
|
|
|
- brokersMetrics.setOnlinePartitionCount(0);
|
|
|
- brokersMetrics.setOfflinePartitionCount(0);
|
|
|
- brokersMetrics.setUnderReplicatedPartitionCount(0);
|
|
|
- brokersMetrics.setInSyncReplicasCount(0);
|
|
|
- brokersMetrics.setOutOfSyncReplicasCount(0);
|
|
|
- }
|
|
|
-
|
|
|
- private InternalTopic collectTopicData(TopicDescription topicDescription) {
|
|
|
- TopicDetails topicDetails = new TopicDetails();
|
|
|
- var topic = InternalTopic.builder();
|
|
|
- topic.internal(topicDescription.isInternal());
|
|
|
- topic.name(topicDescription.name());
|
|
|
- List<InternalPartition> partitions = new ArrayList<>();
|
|
|
-
|
|
|
- int inSyncReplicasCount;
|
|
|
- int replicasCount;
|
|
|
-
|
|
|
- partitions.addAll(topicDescription.partitions().stream().map(
|
|
|
- partition -> {
|
|
|
- var partitionDto = InternalPartition.builder();
|
|
|
- partitionDto.leader(partition.leader().id());
|
|
|
- partitionDto.partition(partition.partition());
|
|
|
- partitionDto.inSyncReplicasCount(partition.isr().size());
|
|
|
- partitionDto.replicasCount(partition.replicas().size());
|
|
|
- List<InternalReplica> replicas = partition.replicas().stream().map(
|
|
|
- r -> new InternalReplica(r.id(), partition.leader().id()!=r.id(), partition.isr().contains(r)))
|
|
|
- .collect(Collectors.toList());
|
|
|
- partitionDto.replicas(replicas);
|
|
|
- return partitionDto.build();
|
|
|
- })
|
|
|
- .collect(Collectors.toList()));
|
|
|
-
|
|
|
- Integer urpCount = partitions.stream().flatMap(partition -> partition.getReplicas().stream()).filter(InternalReplica::isInSync).map(e -> 1).reduce(0, Integer::sum);
|
|
|
- inSyncReplicasCount = partitions.stream().flatMap(s -> Stream.of(s.getInSyncReplicasCount())).reduce(Integer::sum).orElseGet(() -> 0);
|
|
|
- replicasCount = partitions.stream().flatMap(s -> Stream.of(s.getReplicasCount())).reduce(Integer::sum).orElseGet(() -> 0);
|
|
|
|
|
|
- topic.partitions(partitions);
|
|
|
-
|
|
|
- topicDetails.setReplicas(replicasCount);
|
|
|
- topicDetails.setPartitionCount(topicDescription.partitions().size());
|
|
|
- topicDetails.setInSyncReplicas(inSyncReplicasCount);
|
|
|
- topicDetails.setReplicationFactor(topicDescription.partitions().size() > 0
|
|
|
- ? topicDescription.partitions().get(0).replicas().size()
|
|
|
- : null);
|
|
|
- topicDetails.setUnderReplicatedPartitions(urpCount);
|
|
|
-
|
|
|
- topic.topicDetails(topicDetails);
|
|
|
-
|
|
|
- return topic.build();
|
|
|
- }
|
|
|
|
|
|
private Mono<TopicDescription> getTopicDescription(KafkaFuture<TopicDescription> entry, String topicName) {
|
|
|
return ClusterUtil.toMono(entry)
|
|
@@ -249,30 +209,36 @@ public class KafkaService {
|
|
|
}
|
|
|
|
|
|
@SneakyThrows
|
|
|
- private Flux<Map<String, List<TopicConfig>>> loadTopicConfig(AdminClient adminClient, List<String> topicNames) {
|
|
|
- return Flux.fromIterable(topicNames).flatMap(topicName -> {
|
|
|
- Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.TOPIC, topicName));
|
|
|
- return ClusterUtil.toMono(adminClient.describeConfigs(resources).all())
|
|
|
- .map(configs -> {
|
|
|
- if (configs.isEmpty()) return Collections.emptyMap();
|
|
|
- Collection<ConfigEntry> entries = configs.values().iterator().next().entries();
|
|
|
- List<TopicConfig> topicConfigs = new ArrayList<>();
|
|
|
- for (ConfigEntry entry : entries) {
|
|
|
- TopicConfig topicConfig = new TopicConfig();
|
|
|
- topicConfig.setName(entry.name());
|
|
|
- topicConfig.setValue(entry.value());
|
|
|
- if (topicConfig.getName().equals(MESSAGE_FORMAT_VERSION_CONFIG)) {
|
|
|
- topicConfig.setDefaultValue(topicConfig.getValue());
|
|
|
- } else {
|
|
|
- topicConfig.setDefaultValue(TOPIC_DEFAULT_CONFIGS.get(entry.name()));
|
|
|
- }
|
|
|
- topicConfigs.add(topicConfig);
|
|
|
- }
|
|
|
- return Collections.singletonMap(topicName, topicConfigs);
|
|
|
- });
|
|
|
- });
|
|
|
+ private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(AdminClient adminClient, List<String> topicNames) {
|
|
|
+ List<ConfigResource> resources = topicNames.stream()
|
|
|
+ .map(topicName -> new ConfigResource(ConfigResource.Type.TOPIC, topicName))
|
|
|
+ .collect(Collectors.toList());
|
|
|
+
|
|
|
+ return ClusterUtil.toMono(adminClient.describeConfigs(resources).all())
|
|
|
+ .map(configs ->
|
|
|
+ configs.entrySet().stream().map(
|
|
|
+ c -> Tuples.of(
|
|
|
+ c.getKey().name(),
|
|
|
+ c.getValue().entries().stream().map(ClusterUtil::mapToInternalTopicConfig).collect(Collectors.toList())
|
|
|
+ )
|
|
|
+ ).collect(Collectors.toMap(
|
|
|
+ Tuple2::getT1,
|
|
|
+ Tuple2::getT2
|
|
|
+ ))
|
|
|
+ );
|
|
|
+ }
|
|
|
+
|
|
|
+ public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
|
|
|
+ var adminClient = this.createAdminClient(cluster);
|
|
|
+
|
|
|
+ return ClusterUtil.toMono(adminClient.listConsumerGroups().all())
|
|
|
+ .flatMap(s -> ClusterUtil.toMono(adminClient
|
|
|
+ .describeConsumerGroups(s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())).all()))
|
|
|
+ .map(s -> s.values().stream()
|
|
|
+ .map(c -> ClusterUtil.convertToConsumerGroup(c, cluster)).collect(Collectors.toList()));
|
|
|
}
|
|
|
|
|
|
+
|
|
|
@SneakyThrows
|
|
|
private Mono<Void> createTopic(AdminClient adminClient, NewTopic newTopic) {
|
|
|
return ClusterUtil.toMono(adminClient.createTopics(Collections.singletonList(newTopic))
|
|
@@ -281,21 +247,4 @@ public class KafkaService {
|
|
|
.iterator()
|
|
|
.next());
|
|
|
}
|
|
|
-
|
|
|
- private InternalBrokersMetrics updateBrokersMetrics(InternalBrokersMetrics brokersMetricsInput, List<InternalTopic> topics) {
|
|
|
- var tempBrokersMetrics = InternalBrokersMetrics.builder().build();
|
|
|
- var brokersMetrics = brokersMetricsInput.toBuilder();
|
|
|
- for (InternalTopic topic : topics) {
|
|
|
- tempBrokersMetrics.increaseUnderReplicatedPartitionCount(topic.getTopicDetails().getUnderReplicatedPartitions());
|
|
|
- tempBrokersMetrics.increaseInSyncReplicasCount(topic.getTopicDetails().getInSyncReplicas());
|
|
|
- tempBrokersMetrics.increaseOutOfSyncReplicasCount(topic.getTopicDetails().getReplicas() - topic.getTopicDetails().getInSyncReplicas());
|
|
|
- tempBrokersMetrics.increaseOnlinePartitionCount(topic.getPartitions().stream().filter(s -> s.getLeader() != null).map(e -> 1).reduce(0, Integer::sum));
|
|
|
- tempBrokersMetrics.increaseOfflinePartitionCount(topic.getPartitions().stream().filter(s -> s.getLeader() == null).map(e -> 1).reduce(0, Integer::sum));
|
|
|
- }
|
|
|
- return brokersMetrics.underReplicatedPartitionCount(tempBrokersMetrics.getUnderReplicatedPartitionCount())
|
|
|
- .inSyncReplicasCount(tempBrokersMetrics.getInSyncReplicasCount())
|
|
|
- .outOfSyncReplicasCount(tempBrokersMetrics.getOutOfSyncReplicasCount())
|
|
|
- .onlinePartitionCount(tempBrokersMetrics.getOnlinePartitionCount())
|
|
|
- .offlinePartitionCount(tempBrokersMetrics.getOfflinePartitionCount()).build();
|
|
|
- }
|
|
|
}
|