Просмотр исходного кода

Backend for updating topics, fixed backend from another pr branches

Roman Nedzvetskiy 5 лет назад
Родитель
Сommit
ee2f0cdfab

+ 27 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/ExtendedAdminClient.java

@@ -0,0 +1,27 @@
+package com.provectus.kafka.ui.cluster.model;
+
+import com.provectus.kafka.ui.cluster.util.ClusterUtil;
+import lombok.Data;
+import lombok.RequiredArgsConstructor;
+import org.apache.kafka.clients.admin.AdminClient;
+import reactor.core.publisher.Mono;
+
+import java.util.List;
+
+@Data
+@RequiredArgsConstructor
+public class ExtendedAdminClient {
+
+    private final AdminClient adminClient;
+    private final List<SupportedFeatures> supportedFeatures;
+
+    public enum SupportedFeatures {
+        INCREMENTAL_ALTER_CONFIGS,
+        ALTER_CONFIGS
+    }
+
+    public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
+        return ClusterUtil.getSupportedFeatures(adminClient)
+                .map(s -> new ExtendedAdminClient(adminClient, s));
+    }
+}

+ 21 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ClusterService.java

@@ -8,13 +8,11 @@ import com.provectus.kafka.ui.kafka.KafkaService;
 import com.provectus.kafka.ui.model.*;
 import lombok.RequiredArgsConstructor;
 import lombok.SneakyThrows;
-import org.apache.kafka.clients.admin.ConsumerGroupListing;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.serialization.StringDeserializer;
-import org.springframework.http.ResponseEntity;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
@@ -76,7 +74,7 @@ public class ClusterService {
         var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
 
         return kafkaService.getOrCreateAdminClient(cluster).map(ac ->
-                                ac.describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
+                                ac.getAdminClient().describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
             ).flatMap(groups ->
                 groupMetadata(cluster, consumerGroupId)
                     .flatMap(offsets -> {
@@ -93,7 +91,7 @@ public class ClusterService {
     public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster, String consumerGroupId) {
         return
                 kafkaService.getOrCreateAdminClient(cluster)
-                        .map(ac -> ac.listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata())
+                        .map(ac -> ac.getAdminClient().listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata())
                         .flatMap(ClusterUtil::toMono);
     }
 
@@ -114,20 +112,11 @@ public class ClusterService {
             return clustersStorage.getClusterByName(clusterName)
                     .map(kafkaService::getConsumerGroups)
                     .orElse(Mono.empty());
-
-//        var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
-//            return kafkaService.getOrCreateAdminClient(cluster).map(ac -> ac.listConsumerGroups().all())
-//                    .flatMap(s ->
-//                            kafkaService.getOrCreateAdminClient(cluster).flatMap(ac ->
-//                                ClusterUtil.toMono(s).map(s1 -> s1.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())).map(ac::describeConsumerGroups)
-//                    ))
-//                    .flatMap(s -> ClusterUtil.toMono(s.all()).map(details -> details.values().stream()
-//                            .map(c -> ClusterUtil.convertToConsumerGroup(c, cluster)).collect(Collectors.toList())));
     }
 
     public Flux<Broker> getBrokers (String clusterName) {
         return kafkaService.getOrCreateAdminClient(clustersStorage.getClusterByName(clusterName).orElseThrow())
-                .flatMap(client -> ClusterUtil.toMono(client.describeCluster().nodes())
+                .flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
                     .map(n -> n.stream().map(node -> {
                         Broker broker = new Broker();
                         broker.setId(node.idString());
@@ -135,4 +124,22 @@ public class ClusterService {
                     }).collect(Collectors.toList())))
                 .flatMapMany(Flux::fromIterable);
     }
+
+    @SneakyThrows
+    public Mono<Topic> updateTopic(String clusterName, String topicName, Mono<TopicFormData> topicFormData) {
+        return clustersStorage.getClusterByName(clusterName).map(cl ->
+                topicFormData
+                        .flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
+                        .flatMap(t -> updateCluster(t, clusterName, cl))
+        )
+                .orElse(Mono.empty());
+    }
+
+    private <T> Mono<T> updateCluster(T topic, String clusterName, KafkaCluster cluster) {
+        return kafkaService.getUpdatedCluster(cluster)
+                .map(c -> {
+                    clustersStorage.setKafkaCluster(clusterName, c);
+                    return topic;
+                });
+    }
 }

+ 65 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/ClusterUtil.java

@@ -1,18 +1,18 @@
 package com.provectus.kafka.ui.cluster.util;
 
 import com.provectus.kafka.ui.cluster.model.*;
-import com.provectus.kafka.ui.model.ConsumerGroup;
-import com.provectus.kafka.ui.model.ConsumerTopicPartitionDetail;
-import com.provectus.kafka.ui.model.ServerStatus;
-import org.apache.kafka.clients.admin.ConfigEntry;
-import org.apache.kafka.clients.admin.ConsumerGroupDescription;
-import org.apache.kafka.clients.admin.MemberDescription;
-import org.apache.kafka.clients.admin.TopicDescription;
+import com.provectus.kafka.ui.model.*;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.admin.*;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.KafkaFuture;
+import org.apache.kafka.common.Node;
 import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.config.ConfigResource;
 import reactor.core.publisher.Mono;
 
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
@@ -21,8 +21,11 @@ import java.util.stream.Stream;
 import static com.provectus.kafka.ui.kafka.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
 import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
 
+@Slf4j
 public class ClusterUtil {
 
+    private static final String CLUSTER_VERSION_PARAM_KEY = "inter.broker.protocol.version";
+
     public static <T> Mono<T> toMono(KafkaFuture<T> future){
         return Mono.create(sink -> future.whenComplete((res, ex)->{
             if (ex!=null) {
@@ -33,6 +36,16 @@ public class ClusterUtil {
         }));
     }
 
+    public static Mono<String> toMono(KafkaFuture<Void> future, String topicName){
+        return Mono.create(sink -> future.whenComplete((res, ex)->{
+            if (ex!=null) {
+                sink.error(ex);
+            } else {
+                sink.success(topicName);
+            }
+        }));
+    }
+
     public static ConsumerGroup convertToConsumerGroup(ConsumerGroupDescription c, KafkaCluster cluster) {
         ConsumerGroup consumerGroup = new ConsumerGroup();
         consumerGroup.setConsumerGroupId(c.groupId());
@@ -127,4 +140,49 @@ public class ClusterUtil {
         return serverStatus.equals(ServerStatus.ONLINE) ? 1 : 0;
     }
 
+    public static Mono<List<ExtendedAdminClient.SupportedFeatures>> getSupportedFeatures(AdminClient adminClient) {
+        List<ExtendedAdminClient.SupportedFeatures> supportedFeatures = new ArrayList<>();
+        return ClusterUtil.toMono(adminClient.describeCluster().controller())
+                .map(Node::id)
+                .map(id -> Collections.singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
+                .flatMap(brokerCR -> ClusterUtil.toMono(adminClient.describeConfigs(brokerCR).all())
+                        .map(s -> {
+                            supportedFeatures.add(getSupportedUpdateFeature(s));
+                            return supportedFeatures;
+                        }));
+    }
+
+    private static ExtendedAdminClient.SupportedFeatures getSupportedUpdateFeature(Map<ConfigResource, Config> configs) {
+        String version = configs.values().stream()
+                .map(en -> en.entries().stream()
+                        .filter(en1 -> en1.name().contains(CLUSTER_VERSION_PARAM_KEY))
+                        .findFirst().orElseThrow())
+                .findFirst().orElseThrow().value();
+        try {
+            return Float.parseFloat(version.split("-")[0]) <= 2.3f
+                    ? ExtendedAdminClient.SupportedFeatures.ALTER_CONFIGS : ExtendedAdminClient.SupportedFeatures.INCREMENTAL_ALTER_CONFIGS;
+        } catch (Exception e) {
+            log.error("Conversion clusterVersion {} to float value failed", version);
+            throw e;
+        }
+    }
+
+    public static Topic convertToTopic (InternalTopic internalTopic) {
+        Topic topic = new Topic();
+        topic.setName(internalTopic.getName());
+        List<Partition> partitions = internalTopic.getPartitions().stream().flatMap(s -> {
+            Partition partition = new Partition();
+            partition.setPartition(s.getPartition());
+            partition.setLeader(s.getLeader());
+            partition.setReplicas(s.getReplicas().stream().flatMap(r -> {
+                Replica replica = new Replica();
+                replica.setBroker(r.getBroker());
+                return Stream.of(replica);
+            }).collect(Collectors.toList()));
+            return Stream.of(partition);
+        }).collect(Collectors.toList());
+        topic.setPartitions(partitions);
+        return topic;
+    }
+
 }

+ 65 - 31
kafka-ui-api/src/main/java/com/provectus/kafka/ui/kafka/KafkaService.java

@@ -1,12 +1,10 @@
 package com.provectus.kafka.ui.kafka;
 
-import com.provectus.kafka.ui.cluster.model.InternalClusterMetrics;
-import com.provectus.kafka.ui.cluster.model.InternalTopic;
-import com.provectus.kafka.ui.cluster.model.InternalTopicConfig;
-import com.provectus.kafka.ui.cluster.model.KafkaCluster;
+import com.provectus.kafka.ui.cluster.model.*;
 import com.provectus.kafka.ui.cluster.util.ClusterUtil;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ServerStatus;
+import com.provectus.kafka.ui.model.Topic;
 import com.provectus.kafka.ui.model.TopicFormData;
 import com.provectus.kafka.ui.zookeeper.ZookeeperService;
 import lombok.RequiredArgsConstructor;
@@ -16,6 +14,7 @@ import org.apache.kafka.clients.admin.*;
 import org.apache.kafka.common.KafkaFuture;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.config.ConfigResource;
+import org.springframework.beans.factory.annotation.Value;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Mono;
 import reactor.util.function.Tuple2;
@@ -27,23 +26,27 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
+import java.util.stream.Stream;
 
 @Service
 @RequiredArgsConstructor
 @Log4j2
 public class KafkaService {
 
+    @Value("${kafka.admin-client-timeout}")
+    private int clientTimeout;
+
     private static final ListTopicsOptions LIST_TOPICS_OPTIONS = new ListTopicsOptions().listInternal(true);
 
     private final ZookeeperService zookeeperService;
-    private final Map<String, AdminClient> adminClientCache = new ConcurrentHashMap<>();
+    private final Map<String, Mono<ExtendedAdminClient>> adminClientCache = new ConcurrentHashMap<>();
 
     @SneakyThrows
     public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
         return getOrCreateAdminClient(cluster).flatMap(
-                ac -> getClusterMetrics(ac).flatMap( clusterMetrics ->
-                            getTopicsData(ac).flatMap( topics ->
-                                loadTopicsConfig(ac, topics.stream().map(InternalTopic::getName).collect(Collectors.toList()))
+                ac -> getClusterMetrics(ac.getAdminClient()).flatMap( clusterMetrics ->
+                            getTopicsData(ac.getAdminClient()).flatMap( topics ->
+                                loadTopicsConfig(ac.getAdminClient(), topics.stream().map(InternalTopic::getName).collect(Collectors.toList()))
                                         .map( configs -> mergeWithConfigs(topics, configs) )
                             ).map( topics -> buildFromData(cluster, clusterMetrics, topics))
                         )
@@ -150,8 +153,7 @@ public class KafkaService {
 
 
     public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicFormData> topicFormData) {
-        AdminClient adminClient = this.createAdminClient(cluster);
-        return this.createTopic(adminClient, topicFormData);
+        return getOrCreateAdminClient(cluster).flatMap(ac -> createTopic(ac.getAdminClient(), topicFormData));
     }
 
     @SneakyThrows
@@ -180,24 +182,23 @@ public class KafkaService {
     }
 
 
-    public Mono<AdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
-        AdminClient adminClient = adminClientCache.computeIfAbsent(
+    public Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
+        return adminClientCache.computeIfAbsent(
                 cluster.getName(),
                 (id) -> createAdminClient(cluster)
-        );
-
-        return isAdminClientConnected(adminClient);
+        ).flatMap(this::isAdminClientConnected);
     }
 
-    public AdminClient createAdminClient(KafkaCluster kafkaCluster) {
+    public Mono<ExtendedAdminClient> createAdminClient(KafkaCluster kafkaCluster) {
         Properties properties = new Properties();
         properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers());
-        properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, 5000);
-        return AdminClient.create(properties);
+        properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
+        AdminClient adminClient = AdminClient.create(properties);
+        return ExtendedAdminClient.extendedAdminClient(adminClient);
     }
 
-    private Mono<AdminClient> isAdminClientConnected(AdminClient adminClient) {
-        return getClusterId(adminClient).map( r -> adminClient);
+    private Mono<ExtendedAdminClient> isAdminClientConnected(ExtendedAdminClient adminClient) {
+        return getClusterId(adminClient.getAdminClient()).map( r -> adminClient);
     }
 
 
@@ -231,22 +232,55 @@ public class KafkaService {
     }
 
     public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
-        var adminClient =  this.createAdminClient(cluster);
-
-        return ClusterUtil.toMono(adminClient.listConsumerGroups().all())
-                .flatMap(s -> ClusterUtil.toMono(adminClient
+        return getOrCreateAdminClient(cluster).flatMap(ac -> ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
+                .flatMap(s -> ClusterUtil.toMono(ac.getAdminClient()
                         .describeConsumerGroups(s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())).all()))
                 .map(s -> s.values().stream()
-                        .map(c -> ClusterUtil.convertToConsumerGroup(c, cluster)).collect(Collectors.toList()));
+                        .map(c -> ClusterUtil.convertToConsumerGroup(c, cluster)).collect(Collectors.toList())));
     }
 
 
     @SneakyThrows
-    private Mono<Void> createTopic(AdminClient adminClient, NewTopic newTopic) {
-        return ClusterUtil.toMono(adminClient.createTopics(Collections.singletonList(newTopic))
-                    .values()
-                    .values()
-                    .iterator()
-                    .next());
+    private Mono<String> createTopic(AdminClient adminClient, NewTopic newTopic) {
+        return ClusterUtil.toMono(adminClient.createTopics(Collections.singletonList(newTopic)).all(), newTopic.name());
+    }
+
+    @SneakyThrows
+    public Mono<Topic> updateTopic(KafkaCluster cluster, String topicName, TopicFormData topicFormData) {
+        ConfigResource topicCR = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
+        return getOrCreateAdminClient(cluster)
+                .flatMap(ac -> {
+                    if (ac.getSupportedFeatures().contains(ExtendedAdminClient.SupportedFeatures.INCREMENTAL_ALTER_CONFIGS)) {
+                        return incrementalAlterConfig(topicFormData, topicCR, ac)
+                                .flatMap(c -> getUpdatedTopic(ac, topicName));
+                    } else {
+                        return alterConfig(topicFormData, topicCR, ac)
+                                .flatMap(c -> getUpdatedTopic(ac, topicName));
+                    }
+                });
+    }
+
+
+
+    private Mono<Topic> getUpdatedTopic (ExtendedAdminClient ac, String topicName) {
+        return getTopicsData(ac.getAdminClient())
+                .map(s -> s.stream()
+                        .filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow())
+                .map(ClusterUtil::convertToTopic);
+    }
+
+    private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) {
+        List<AlterConfigOp> listOp = topicFormData.getConfigs().entrySet().stream()
+                .flatMap(cfg -> Stream.of(new AlterConfigOp(new ConfigEntry(cfg.getKey(), cfg.getValue()), AlterConfigOp.OpType.SET))).collect(Collectors.toList());
+        return ClusterUtil.toMono(ac.getAdminClient().incrementalAlterConfigs(Collections.singletonMap(topicCR, listOp)).all(), topicCR.name());
+    }
+
+    private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) {
+        List<ConfigEntry> configEntries = topicFormData.getConfigs().entrySet().stream()
+                .flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue()))).collect(Collectors.toList());
+        Config config = new Config(configEntries);
+        Map<ConfigResource, Config> map = Collections.singletonMap(topicCR, config);
+        return ClusterUtil.toMono(ac.getAdminClient().alterConfigs(map).all(), topicCR.name());
+
     }
 }

+ 6 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/rest/MetricsRestController.java

@@ -11,9 +11,8 @@ import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
-import java.time.OffsetDateTime;
-
 import javax.validation.Valid;
+import java.time.OffsetDateTime;
 
 @RestController
 @RequiredArgsConstructor
@@ -88,4 +87,9 @@ public class MetricsRestController implements ApiClustersApi {
     public Mono<ResponseEntity<ConsumerGroupDetails>> getConsumerGroup(String clusterName, String consumerGroupId, ServerWebExchange exchange) {
         return clusterService.getConsumerGroupDetail(clusterName, consumerGroupId).map(ResponseEntity::ok);
     }
+
+    @Override
+    public Mono<ResponseEntity<Topic>> updateTopic(String clusterId, String topicName, @Valid Mono<TopicFormData> topicFormData, ServerWebExchange exchange) {
+        return clusterService.updateTopic(clusterId, topicName, topicFormData).map(ResponseEntity::ok);
+    }
 }

+ 4 - 1
kafka-ui-api/src/main/resources/application-local.yml

@@ -7,4 +7,7 @@ kafka:
     -
       name: secondLocal
       bootstrapServers: localhost:29092
-      zookeeper: localhost:2182
+      zookeeper: localhost:2182
+  admin-client-timeout: 5000
+zookeeper:
+  connection-timeout: 1000

+ 4 - 1
kafka-ui-api/src/main/resources/application-sdp.yml

@@ -7,4 +7,7 @@ kafka:
     -
       name: secondLocal
       zookeeper: zookeeper1:2181
-      bootstrapServers: kafka1:29092
+      bootstrapServers: kafka1:29092
+  admin-client-timeout: 5000
+zookeeper:
+  connection-timeout: 1000

+ 93 - 3
kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml

@@ -141,6 +141,34 @@ paths:
             application/json:
               schema:
                 $ref: '#/components/schemas/TopicDetails'
+    patch:
+      tags:
+        - /api/clusters
+      summary: updateTopic
+      operationId: updateTopic
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/TopicFormData'
+      responses:
+        200:
+          description: Updated
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/Topic'
 
   /api/clusters/{clusterName}/topics/{topicName}/config:
     get:
@@ -210,12 +238,37 @@ paths:
                 items:
                   $ref: '#/components/schemas/TopicMessage'
 
-  /api/clusters/{clusterName}/consumerGroups:
+  /api/clusters/{clusterName}/consumer-groups/{id}:
     get:
       tags:
         - /api/clusters
-      summary: getConsumerGroup
+      summary: get Consumer Group By Id
       operationId: getConsumerGroup
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: id
+          in: path
+          required: true
+          schema:
+            type: string
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/ConsumerGroupDetails'
+
+  /api/clusters/{clusterName}/consumerGroups:
+    get:
+      tags:
+        - /api/clusters
+      summary: get all ConsumerGroups
+      operationId: getConsumerGroups
       parameters:
         - name: clusterName
           in: path
@@ -406,4 +459,41 @@ components:
       required:
         - partition
         - offset
-        - timestamp
+        - timestamp
+
+    TopicPartitionDto:
+      type: object
+      properties:
+        topic:
+          type: string
+        partition:
+          type: integer
+      required:
+        - topic
+        - partition
+
+    ConsumerTopicPartitionDetail:
+      type: object
+      properties:
+        consumerId:
+          type: string
+        topic:
+          type: string
+        partition:
+          type: integer
+        currentOffset:
+          type: long
+        endOffset:
+          type: long
+        messagesBehind:
+          type: long
+
+    ConsumerGroupDetails:
+      type: object
+      properties:
+        consumerGroupId:
+          type: string
+        consumers:
+          type: array
+          items:
+            $ref: '#/components/schemas/ConsumerTopicPartitionDetail'