Forráskód Böngészése

changed to immutable

Roman Nedzvetskiy 5 éve
szülő
commit
2c6cca65b9

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/mapper/ClusterMapper.java

@@ -14,6 +14,6 @@ public abstract class ClusterMapper {
     @Mapping(target = "lastKafkaException", ignore = true)
     @Mapping(target = "lastZookeeperException", ignore = true)
     @Mapping(target = "topicConfigsMap", ignore = true)
-    @Mapping(target = "topics", ignore = true)
+//    @Mapping(target = "topics", ignore = true)
     public abstract KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
 }

+ 7 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/model/KafkaCluster.java

@@ -4,14 +4,13 @@ import com.provectus.kafka.ui.model.*;
 import lombok.Builder;
 import lombok.Data;
 
-import java.util.ArrayList;
+import java.io.Serializable;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
 
 @Data
-@Builder(toBuilder = true)
-public class KafkaCluster {
+@Builder(toBuilder = true, builderClassName = "KafkaClusterBuilder")
+public class KafkaCluster implements Serializable {
 
     private final String id = "";
     private final String name;
@@ -23,10 +22,10 @@ public class KafkaCluster {
     private final Cluster cluster;
     private final BrokersMetrics brokersMetrics;
 
-    private List<Topic> topics = new ArrayList<>();
-    private final Map<String, TopicDetails> topicDetailsMap = new ConcurrentHashMap<>();
-    private Map<String, List<TopicConfig>> topicConfigsMap = new ConcurrentHashMap<>();
-    private final ServerStatus zookeeperStatus = ServerStatus.OFFLINE;
+    private final List<Topic> topics;
+    private final Map<String, TopicDetails> topicDetailsMap;
+    private final Map<String, List<TopicConfig>> topicConfigsMap;
+    private final ServerStatus zookeeperStatus;
 
     private final Throwable lastKafkaException;
     private final Throwable lastZookeeperException;

+ 14 - 16
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/service/ClusterService.java

@@ -8,8 +8,6 @@ import com.provectus.kafka.ui.model.*;
 import lombok.RequiredArgsConstructor;
 import lombok.SneakyThrows;
 import org.apache.kafka.clients.admin.ConsumerGroupListing;
-import org.springframework.http.HttpStatus;
-import org.springframework.http.ResponseEntity;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
@@ -24,48 +22,48 @@ public class ClusterService {
     private final ClustersStorage clustersStorage;
     private final KafkaService kafkaService;
 
-    public Mono<ResponseEntity<Flux<Cluster>>> getClusters() {
+    public Flux<Cluster> getClusters() {
         List<Cluster> clusters = clustersStorage.getKafkaClusters()
                 .stream()
                 .map(KafkaCluster::getCluster)
                 .collect(Collectors.toList());
 
-        return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusters)));
+        return Flux.fromIterable(clusters);
     }
 
-    public Mono<ResponseEntity<BrokersMetrics>> getBrokersMetrics(String name) {
+    public BrokersMetrics getBrokersMetrics(String name) {
         KafkaCluster cluster = clustersStorage.getClusterByName(name);
         if (cluster == null) return null;
-        return Mono.just(ResponseEntity.ok(cluster.getBrokersMetrics()));
+        return cluster.getBrokersMetrics();
     }
 
-    public Mono<ResponseEntity<Flux<Topic>>> getTopics(String name) {
+    public Flux<Topic> getTopics(String name) {
         KafkaCluster cluster = clustersStorage.getClusterByName(name);
         if (cluster == null) return null;
-        return Mono.just(ResponseEntity.ok(Flux.fromIterable(cluster.getTopics())));
+        return Flux.fromIterable(cluster.getTopics());
     }
 
-    public Mono<ResponseEntity<TopicDetails>> getTopicDetails(String name, String topicName) {
+    public TopicDetails getTopicDetails(String name, String topicName) {
         KafkaCluster cluster = clustersStorage.getClusterByName(name);
         if (cluster == null) return null;
-        return Mono.just(ResponseEntity.ok(cluster.getOrCreateTopicDetails(topicName)));
+        return cluster.getOrCreateTopicDetails(topicName);
     }
 
-    public Mono<ResponseEntity<Flux<TopicConfig>>> getTopicConfigs(String name, String topicName) {
+    public Flux<TopicConfig> getTopicConfigs(String name, String topicName) {
         KafkaCluster cluster = clustersStorage.getClusterByName(name);
         if (cluster == null) return null;
-        return Mono.just(ResponseEntity.ok(Flux.fromIterable(cluster.getTopicConfigsMap().get(topicName))));
+        return Flux.fromIterable(cluster.getTopicConfigsMap().get(topicName));
     }
 
-    public Mono<ResponseEntity<Topic>> createTopic(String name, Mono<TopicFormData> topicFormData) {
+    public Mono<Topic> createTopic(String name, Mono<TopicFormData> topicFormData) {
         KafkaCluster cluster = clustersStorage.getClusterByName(name);
         if (cluster == null) return null;
         var adminClient = kafkaService.createAdminClient(cluster);
-        return kafkaService.createTopic(adminClient, cluster, topicFormData).map(s -> new ResponseEntity<>(s, HttpStatus.CREATED));
+        return kafkaService.createTopic(adminClient, cluster, topicFormData);
     }
 
     @SneakyThrows
-    public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroup (String clusterName) {
+    public Flux<ConsumerGroup> getConsumerGroup (String clusterName) {
             var cluster = clustersStorage.getClusterByName(clusterName);
             var adminClient =  kafkaService.createAdminClient(cluster);
             return ClusterUtil.toMono(adminClient.listConsumerGroups().all())
@@ -73,6 +71,6 @@ public class ClusterService {
                             .describeConsumerGroups(s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())).all()))
                     .map(s -> s.values().stream()
                             .map(c -> ClusterUtil.convertToConsumerGroup(c, cluster)).collect(Collectors.toList()))
-                    .map(s -> ResponseEntity.ok(Flux.fromIterable(s)));
+                    .flatMapIterable(s -> s);
     }
 }

+ 18 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/cluster/util/ClusterUtil.java

@@ -6,11 +6,15 @@ import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.Partition;
 import com.provectus.kafka.ui.model.Replica;
 import com.provectus.kafka.ui.model.Topic;
+import lombok.SneakyThrows;
 import org.apache.kafka.clients.admin.ConsumerGroupDescription;
 import org.apache.kafka.common.KafkaFuture;
 import reactor.core.publisher.Mono;
 
-import java.util.ArrayList;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
@@ -62,4 +66,17 @@ public class ClusterUtil {
         }).collect(Collectors.toList()));
         return topic;
     }
+
+    @SneakyThrows
+    public static <T> T clone(T subject) {
+        ByteArrayOutputStream baos = new ByteArrayOutputStream();
+        ObjectOutputStream ous = new ObjectOutputStream(baos);
+        ous.writeObject(subject);
+        ous.close();
+        ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
+        ObjectInputStream ois = new ObjectInputStream(bais);
+        return (T) ois.readObject();
+
+
+    }
 }

+ 90 - 78
kafka-ui-api/src/main/java/com/provectus/kafka/ui/kafka/KafkaService.java

@@ -31,39 +31,52 @@ public class KafkaService {
 
     private Map<String, AdminClient> adminClientCache = new ConcurrentHashMap<>();
 
+    @SneakyThrows
     public Mono<ClusterWithId> getUpdatedCluster(ClusterWithId clusterWithId) {
-        var kafkaCluster = clusterWithId.getKafkaCluster();
+        var tempCluster = ClusterUtil.clone(clusterWithId.getKafkaCluster());
+        var internalCluster = clusterWithId.getKafkaCluster().toBuilder();
         return getOrCreateAdminClient(clusterWithId).flatMap(
                     ac ->
-                        getClusterMetrics(ac, kafkaCluster).flatMap(
-                                metrics -> {
-                                    Cluster cluster = kafkaCluster.getCluster();
-                                    cluster.setStatus(ServerStatus.ONLINE);
-                                    cluster.setBytesInPerSec(metrics.getBytesInPerSec());
-                                    cluster.setBytesOutPerSec(metrics.getBytesOutPerSec());
-                                    BrokersMetrics brokersMetrics = kafkaCluster.getBrokersMetrics() == null ? new BrokersMetrics() : kafkaCluster.getBrokersMetrics();
-                                    brokersMetrics.activeControllers(metrics.getActiveControllers());
-                                    brokersMetrics.brokerCount(metrics.getBrokerCount());
-                                    cluster.setBrokerCount(metrics.getBrokerCount());
-                                    var internalCluster = kafkaCluster.toBuilder().cluster(cluster).brokersMetrics(brokersMetrics).build();
-                                    return getTopicsData(ac, internalCluster)
-                                            .map(topics -> {
-                                                internalCluster.setTopics(ClusterUtil.convertToExternalTopicList(topics));
-                                                internalCluster.getCluster().setTopicCount(topics.size());
-                                                return internalCluster;
-                                            }).map(kc -> clusterWithId.toBuilder().kafkaCluster(
-                                                            kc.toBuilder()
-                                                            .cluster(cluster)
-                                                            .brokersMetrics(brokersMetrics)
-                                                            .build()
-                                            ).build());
-                                })
+                        getClusterMetrics(ac).flatMap(
+                            metrics -> {
+                                Cluster cluster = ClusterUtil.clone(tempCluster.getCluster());
+                                cluster.setStatus(ServerStatus.ONLINE);
+                                cluster.setBytesInPerSec(metrics.getBytesInPerSec());
+                                cluster.setBytesOutPerSec(metrics.getBytesOutPerSec());
+                                BrokersMetrics brokersMetrics = tempCluster.getBrokersMetrics() != null
+                                        ? ClusterUtil.clone(tempCluster.getBrokersMetrics()) : new BrokersMetrics();
+                                brokersMetrics.setBrokerCount(metrics.getBrokerCount());
+                                brokersMetrics.activeControllers(metrics.getActiveControllers());
+                                brokersMetrics.brokerCount(metrics.getBrokerCount());
+                                resetMetrics(brokersMetrics);
+                                cluster.setBrokerCount(metrics.getBrokerCount());
+                                return getTopicsData(ac, internalCluster, cluster, brokersMetrics, tempCluster)
+                                        .map(topics -> {
+                                            internalCluster.topics(ClusterUtil.convertToExternalTopicList(topics));
+                                            cluster.setTopicCount(topics.size());
+                                            return topics;
+                                        })
+                                        .flatMap(topics ->
+                                            loadTopicConfig(ac, topics.stream().map(InternalTopic::getName).collect(Collectors.toList())).collectList()
+                                                    .map(s -> s.stream().collect(Collectors.toMap(map -> new ArrayList<>(map.entrySet()).get(0).getKey(),
+                                                            e -> new ArrayList<>(e.entrySet()).get(0).getValue())))
+                                                    .map(topicsConfig -> {
+                                                        internalCluster.topicConfigsMap(topicsConfig);
+                                                        return internalCluster;
+                                                    })
+                                        ).map(kc -> clusterWithId.toBuilder().kafkaCluster(
+                                                        kc
+                                                        .cluster(cluster)
+                                                        .brokersMetrics(brokersMetrics)
+                                                        .build()
+                                        ).build());
+                            })
             ).onErrorResume(
                     e -> {
-                        Cluster cluster = kafkaCluster.getCluster();
+                        Cluster cluster = ClusterUtil.clone(tempCluster.getCluster());
                         cluster.setStatus(ServerStatus.OFFLINE);
                         return Mono.just(clusterWithId.toBuilder().kafkaCluster(
-                                kafkaCluster.toBuilder()
+                                tempCluster.toBuilder()
                                         .lastKafkaException(e)
                                         .cluster(cluster)
                                         .build()
@@ -73,24 +86,24 @@ public class KafkaService {
     }
 
     @SneakyThrows
-    private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient, KafkaCluster kafkaCluster) {
+    private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient, KafkaCluster.KafkaClusterBuilder kafkaCluster,
+                                                    Cluster cluster, BrokersMetrics brokersMetrics, KafkaCluster tempCluster) {
         ListTopicsOptions listTopicsOptions = new ListTopicsOptions();
         listTopicsOptions.listInternal(true);
         return ClusterUtil.toMono(adminClient.listTopics(listTopicsOptions).names())
                     .map(tl -> {
-                    kafkaCluster.getCluster().setTopicCount(tl.size());
+                    cluster.setTopicCount(tl.size());
                         DescribeTopicsResult topicDescriptionsWrapper = adminClient.describeTopics(tl);
                         Map<String, KafkaFuture<TopicDescription>> topicDescriptionFuturesMap = topicDescriptionsWrapper.values();
-                        resetMetrics(kafkaCluster);
                         return topicDescriptionFuturesMap.entrySet();
                     })
                     .flatMapMany(Flux::fromIterable)
                     .flatMap(s -> ClusterUtil.toMono(s.getValue()))
-                    .flatMap(e -> collectTopicData(kafkaCluster, adminClient, e))
+                    .map(e -> collectTopicData(kafkaCluster, e, cluster, brokersMetrics, tempCluster))
                     .collectList();
     }
 
-    private Mono<Metrics> getClusterMetrics(AdminClient client, KafkaCluster kafkaCluster) {
+    private Mono<Metrics> getClusterMetrics(AdminClient client) {
         return ClusterUtil.toMono(client.describeCluster().nodes())
                 .map(Collection::size)
                 .flatMap(brokers ->
@@ -134,13 +147,8 @@ public class KafkaService {
                         throw new RuntimeException("Can't find created topic");
                     }
                 return s;
-                })
-                .flatMap(td -> collectTopicData(cluster, adminClient, td))
-            .map(topic -> {
-                var resultTopic = ClusterUtil.convertToExternalTopic(topic);
-                cluster.getTopics().add(resultTopic);
-                return resultTopic;
-            });
+                }).map(s -> getUpdatedCluster(new ClusterWithId(cluster.getName(), cluster)))
+                .map(s -> new Topic());
     }
 
     @SneakyThrows
@@ -169,15 +177,16 @@ public class KafkaService {
         return getClusterId(adminClient).map( r -> adminClient);
     }
 
-    private void resetMetrics(KafkaCluster kafkaCluster) {
-        kafkaCluster.getBrokersMetrics().setOnlinePartitionCount(0);
-        kafkaCluster.getBrokersMetrics().setOfflinePartitionCount(0);
-        kafkaCluster.getBrokersMetrics().setUnderReplicatedPartitionCount(0);
-        kafkaCluster.getBrokersMetrics().setInSyncReplicasCount(0);
-        kafkaCluster.getBrokersMetrics().setOutOfSyncReplicasCount(0);
+    private void resetMetrics(BrokersMetrics brokersMetrics) {
+        brokersMetrics.setOnlinePartitionCount(0);
+        brokersMetrics.setOfflinePartitionCount(0);
+        brokersMetrics.setUnderReplicatedPartitionCount(0);
+        brokersMetrics.setInSyncReplicasCount(0);
+        brokersMetrics.setOutOfSyncReplicasCount(0);
     }
 
-    private Mono<InternalTopic> collectTopicData(KafkaCluster kafkaCluster, AdminClient adminClient, TopicDescription topicDescription) {
+    private InternalTopic collectTopicData(KafkaCluster.KafkaClusterBuilder kafkaClusterBuilder, TopicDescription topicDescription,
+                                           Cluster cluster, BrokersMetrics brokersMetrics, KafkaCluster kafkaCluster) {
         var topic = InternalTopic.builder();
         topic.internal(topicDescription.isInternal());
         topic.name(topicDescription.name());
@@ -205,7 +214,11 @@ public class KafkaService {
 
         topic.partitions(partitions);
 
-        var topicDetails = kafkaCluster.getOrCreateTopicDetails(topicDescription.name());
+        if (kafkaCluster.getTopicDetailsMap() == null) {
+            kafkaClusterBuilder.topicDetailsMap(new HashMap<>());
+        }
+
+        var topicDetails = kafkaClusterBuilder.build().getOrCreateTopicDetails(topicDescription.name());
 
         topicDetails.setReplicas(replicasCount);
         topicDetails.setPartitionCount(topicDescription.partitions().size());
@@ -214,19 +227,17 @@ public class KafkaService {
                 ? topicDescription.partitions().get(0).replicas().size()
                 : null);
         topicDetails.setUnderReplicatedPartitions(urpCount);
-        kafkaCluster.getCluster().setOnlinePartitionCount(kafkaCluster.getBrokersMetrics().getOnlinePartitionCount());
-        kafkaCluster.getBrokersMetrics().setUnderReplicatedPartitionCount(
-                kafkaCluster.getBrokersMetrics().getUnderReplicatedPartitionCount() + urpCount);
-        kafkaCluster.getBrokersMetrics().setInSyncReplicasCount(
-                kafkaCluster.getBrokersMetrics().getInSyncReplicasCount() + inSyncReplicasCount);
-        kafkaCluster.getBrokersMetrics().setOutOfSyncReplicasCount(
-                kafkaCluster.getBrokersMetrics().getOutOfSyncReplicasCount() + (replicasCount - inSyncReplicasCount));
-
-        kafkaCluster.getBrokersMetrics().setOnlinePartitionCount(partitions.stream().filter(s -> s.getLeader() != null).map(e -> 1).reduce(0, Integer::sum));
-        kafkaCluster.getBrokersMetrics().setOfflinePartitionCount(partitions.stream().filter(s -> s.getLeader() == null).map(e -> 1).reduce(0, Integer::sum));
-        var resultTopic = topic.build();
+        cluster.setOnlinePartitionCount(brokersMetrics.getOnlinePartitionCount());
+        brokersMetrics.setUnderReplicatedPartitionCount(
+                brokersMetrics.getUnderReplicatedPartitionCount() + urpCount);
+        brokersMetrics.setInSyncReplicasCount(
+                brokersMetrics.getInSyncReplicasCount() + inSyncReplicasCount);
+        brokersMetrics.setOutOfSyncReplicasCount(
+                brokersMetrics.getOutOfSyncReplicasCount() + (replicasCount - inSyncReplicasCount));
 
-        return loadTopicConfig(adminClient, kafkaCluster, resultTopic.getName()).map(l -> resultTopic);
+        brokersMetrics.setOnlinePartitionCount(partitions.stream().filter(s -> s.getLeader() != null).map(e -> 1).reduce(0, Integer::sum));
+        brokersMetrics.setOfflinePartitionCount(partitions.stream().filter(s -> s.getLeader() == null).map(e -> 1).reduce(0, Integer::sum));
+        return topic.build();
     }
 
     private Mono<TopicDescription> getTopicDescription(Map.Entry<String, KafkaFuture<TopicDescription>> entry) {
@@ -238,27 +249,28 @@ public class KafkaService {
     }
 
     @SneakyThrows
-    private Mono<List<TopicConfig>> loadTopicConfig(AdminClient adminClient, KafkaCluster kafkaCluster, String topicName) {
-        Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.TOPIC, topicName));
-        return ClusterUtil.toMono(adminClient.describeConfigs(resources).all())
-                .map(configs -> {
-                if (!configs.isEmpty()) return Collections.emptyList();
-                    Collection<ConfigEntry> entries = configs.values().iterator().next().entries();
-                    List<TopicConfig> topicConfigs = new ArrayList<>();
-                    for (ConfigEntry entry : entries) {
-                        TopicConfig topicConfig = new TopicConfig();
-                        topicConfig.setName(entry.name());
-                        topicConfig.setValue(entry.value());
-                        if (topicConfig.getName().equals(MESSAGE_FORMAT_VERSION_CONFIG)) {
-                            topicConfig.setDefaultValue(topicConfig.getValue());
-                        } else {
-                            topicConfig.setDefaultValue(TOPIC_DEFAULT_CONFIGS.get(entry.name()));
+    private Flux<Map<String, List<TopicConfig>>> loadTopicConfig(AdminClient adminClient, List<String> topicNames) {
+        return Flux.fromIterable(topicNames).flatMap(topicName -> {
+            Set<ConfigResource> resources = Collections.singleton(new ConfigResource(ConfigResource.Type.TOPIC, topicName));
+            return ClusterUtil.toMono(adminClient.describeConfigs(resources).all())
+                    .map(configs -> {
+                        if (configs.isEmpty()) return Collections.emptyMap();
+                        Collection<ConfigEntry> entries = configs.values().iterator().next().entries();
+                        List<TopicConfig> topicConfigs = new ArrayList<>();
+                        for (ConfigEntry entry : entries) {
+                            TopicConfig topicConfig = new TopicConfig();
+                            topicConfig.setName(entry.name());
+                            topicConfig.setValue(entry.value());
+                            if (topicConfig.getName().equals(MESSAGE_FORMAT_VERSION_CONFIG)) {
+                                topicConfig.setDefaultValue(topicConfig.getValue());
+                            } else {
+                                topicConfig.setDefaultValue(TOPIC_DEFAULT_CONFIGS.get(entry.name()));
+                            }
+                            topicConfigs.add(topicConfig);
                         }
-                        topicConfigs.add(topicConfig);
-                    }
-
-                    return kafkaCluster.getTopicConfigsMap().put(topicName, topicConfigs);
-            });
+                        return Collections.singletonMap(topicName, topicConfigs);
+                    });
+         });
     }
 
     @SneakyThrows

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/rest/MetricsRestController.java

@@ -4,7 +4,7 @@ import com.provectus.kafka.ui.api.ApiClustersApi;
 import com.provectus.kafka.ui.cluster.service.ClusterService;
 import com.provectus.kafka.ui.model.*;
 import lombok.RequiredArgsConstructor;
-import org.apache.kafka.clients.admin.ListConsumerGroupsResult;
+import org.springframework.http.HttpStatus;
 import org.springframework.http.ResponseEntity;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.server.ServerWebExchange;
@@ -22,32 +22,32 @@ public class MetricsRestController implements ApiClustersApi {
 
     @Override
     public Mono<ResponseEntity<Flux<Cluster>>> getClusters(ServerWebExchange exchange) {
-        return clusterService.getClusters();
+        return Mono.just(new ResponseEntity<>(clusterService.getClusters(), HttpStatus.OK));
     }
 
     @Override
     public Mono<ResponseEntity<BrokersMetrics>> getBrokersMetrics(String clusterId, ServerWebExchange exchange) {
-        return clusterService.getBrokersMetrics(clusterId);
+        return Mono.just(new ResponseEntity<>(clusterService.getBrokersMetrics(clusterId), HttpStatus.OK));
     }
 
     @Override
     public Mono<ResponseEntity<Flux<Topic>>> getTopics(String clusterId, ServerWebExchange exchange) {
-        return clusterService.getTopics(clusterId);
+        return Mono.just(new ResponseEntity<>(clusterService.getTopics(clusterId), HttpStatus.OK));
     }
 
     @Override
     public Mono<ResponseEntity<TopicDetails>> getTopicDetails(String clusterId, String topicName, ServerWebExchange exchange) {
-        return clusterService.getTopicDetails(clusterId, topicName);
+        return Mono.just(new ResponseEntity<>(clusterService.getTopicDetails(clusterId, topicName), HttpStatus.OK));
     }
 
     @Override
     public Mono<ResponseEntity<Flux<TopicConfig>>> getTopicConfigs(String clusterId, String topicName, ServerWebExchange exchange) {
-        return clusterService.getTopicConfigs(clusterId, topicName);
+        return Mono.just(new ResponseEntity<>(clusterService.getTopicConfigs(clusterId, topicName), HttpStatus.OK));
     }
 
     @Override
     public Mono<ResponseEntity<Topic>> createTopic(String clusterId, @Valid Mono<TopicFormData> topicFormData, ServerWebExchange exchange) {
-        return clusterService.createTopic(clusterId, topicFormData);
+        return clusterService.createTopic(clusterId, topicFormData).map(s -> new ResponseEntity<>(s, HttpStatus.OK));
     }
 
     @Override
@@ -57,6 +57,6 @@ public class MetricsRestController implements ApiClustersApi {
 
     @Override
     public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroup(String clusterName, ServerWebExchange exchange) {
-        return clusterService.getConsumerGroup(clusterName);
+        return Mono.just(new ResponseEntity<>(clusterService.getConsumerGroup(clusterName), HttpStatus.OK));
     }
 }

+ 1 - 0
kafka-ui-contract/pom.xml

@@ -58,6 +58,7 @@
                                         <modelPackage>com.provectus.kafka.ui.model</modelPackage>
                                         <apiPackage>com.provectus.kafka.ui.api</apiPackage>
                                         <sourceFolder>kafka-ui-contract</sourceFolder>
+                                        <serializableModel>true</serializableModel>
 
                                         <reactive>true</reactive>