diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AbstractController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AbstractController.java new file mode 100644 index 0000000000..fd323d55a1 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AbstractController.java @@ -0,0 +1,22 @@ +package com.provectus.kafka.ui.controller; + +import com.provectus.kafka.ui.exception.ClusterNotFoundException; +import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.service.ClustersStorage; +import org.springframework.beans.factory.annotation.Autowired; + +public abstract class AbstractController { + + private ClustersStorage clustersStorage; + + protected KafkaCluster getCluster(String name) { + return clustersStorage.getClusterByName(name) + .orElseThrow(() -> new ClusterNotFoundException( + String.format("Cluster with name '%s' not found", name))); + } + + @Autowired + public void setClustersStorage(ClustersStorage clustersStorage) { + this.clustersStorage = clustersStorage; + } +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java index 1035f82561..a2de575692 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java @@ -7,7 +7,7 @@ import com.provectus.kafka.ui.model.BrokerDTO; import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO; import com.provectus.kafka.ui.model.BrokerMetricsDTO; import com.provectus.kafka.ui.model.BrokersLogdirsDTO; -import com.provectus.kafka.ui.service.ClusterService; +import com.provectus.kafka.ui.service.BrokerService; import java.util.List; import lombok.RequiredArgsConstructor; import lombok.extern.log4j.Log4j2; @@ -20,13 +20,13 @@ import reactor.core.publisher.Mono; @RestController @RequiredArgsConstructor @Log4j2 -public class BrokersController implements BrokersApi { - private final ClusterService clusterService; +public class BrokersController extends AbstractController implements BrokersApi { + private final BrokerService brokerService; @Override public Mono> getBrokersMetrics(String clusterName, Integer id, ServerWebExchange exchange) { - return clusterService.getBrokerMetrics(clusterName, id) + return brokerService.getBrokerMetrics(getCluster(clusterName), id) .map(ResponseEntity::ok) .onErrorReturn(ResponseEntity.notFound().build()); } @@ -34,7 +34,7 @@ public class BrokersController implements BrokersApi { @Override public Mono>> getBrokers(String clusterName, ServerWebExchange exchange) { - return Mono.just(ResponseEntity.ok(clusterService.getBrokers(clusterName))); + return Mono.just(ResponseEntity.ok(brokerService.getBrokers(getCluster(clusterName)))); } @Override @@ -42,13 +42,15 @@ public class BrokersController implements BrokersApi { List brokers, ServerWebExchange exchange ) { - return Mono.just(ResponseEntity.ok(clusterService.getAllBrokersLogdirs(clusterName, brokers))); + return Mono.just(ResponseEntity.ok( + brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers))); } @Override public Mono>> getBrokerConfig(String clusterName, Integer id, ServerWebExchange exchange) { - return Mono.just(ResponseEntity.ok(clusterService.getBrokerConfig(clusterName, id))); + return Mono.just(ResponseEntity.ok( + brokerService.getBrokerConfig(getCluster(clusterName), id))); } @Override @@ -56,7 +58,7 @@ public class BrokersController implements BrokersApi { String clusterName, Integer id, Mono brokerLogdir, ServerWebExchange exchange) { return brokerLogdir - .flatMap(bld -> clusterService.updateBrokerLogDir(clusterName, id, bld)) + .flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld)) .map(ResponseEntity::ok); } @@ -67,8 +69,8 @@ public class BrokersController implements BrokersApi { Mono brokerConfig, ServerWebExchange exchange) { return brokerConfig - .flatMap(bci -> clusterService.updateBrokerConfigByName( - clusterName, id, name, bci.getValue())) + .flatMap(bci -> brokerService.updateBrokerConfigByName( + getCluster(clusterName), id, name, bci.getValue())) .map(ResponseEntity::ok); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java index 9565ff9990..0f0defde22 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java @@ -3,14 +3,12 @@ package com.provectus.kafka.ui.controller; import static java.util.stream.Collectors.toMap; import com.provectus.kafka.ui.api.ConsumerGroupsApi; -import com.provectus.kafka.ui.exception.ClusterNotFoundException; import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.model.ConsumerGroupDTO; import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO; import com.provectus.kafka.ui.model.ConsumerGroupOffsetsResetDTO; import com.provectus.kafka.ui.model.PartitionOffsetDTO; -import com.provectus.kafka.ui.service.ClusterService; -import com.provectus.kafka.ui.service.ClustersStorage; +import com.provectus.kafka.ui.service.ConsumerGroupService; import com.provectus.kafka.ui.service.OffsetsResetService; import java.util.Map; import java.util.Optional; @@ -26,22 +24,22 @@ import reactor.core.publisher.Mono; @RestController @RequiredArgsConstructor @Log4j2 -public class ConsumerGroupsController implements ConsumerGroupsApi { - private final ClusterService clusterService; +public class ConsumerGroupsController extends AbstractController implements ConsumerGroupsApi { + + private final ConsumerGroupService consumerGroupService; private final OffsetsResetService offsetsResetService; - private final ClustersStorage clustersStorage; @Override public Mono> deleteConsumerGroup(String clusterName, String id, ServerWebExchange exchange) { - return clusterService.deleteConsumerGroupById(clusterName, id) + return consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id) .map(ResponseEntity::ok); } @Override public Mono> getConsumerGroup( String clusterName, String consumerGroupId, ServerWebExchange exchange) { - return clusterService.getConsumerGroupDetail(clusterName, consumerGroupId) + return consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId) .map(ResponseEntity::ok); } @@ -49,7 +47,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi { @Override public Mono>> getConsumerGroups(String clusterName, ServerWebExchange exchange) { - return clusterService.getConsumerGroups(clusterName) + return consumerGroupService.getConsumerGroups(getCluster(clusterName)) .map(Flux::fromIterable) .map(ResponseEntity::ok) .switchIfEmpty(Mono.just(ResponseEntity.notFound().build())); @@ -58,7 +56,8 @@ public class ConsumerGroupsController implements ConsumerGroupsApi { @Override public Mono>> getTopicConsumerGroups( String clusterName, String topicName, ServerWebExchange exchange) { - return clusterService.getConsumerGroups(clusterName, Optional.of(topicName)) + return consumerGroupService.getConsumerGroups( + getCluster(clusterName), Optional.of(topicName)) .map(Flux::fromIterable) .map(ResponseEntity::ok) .switchIfEmpty(Mono.just(ResponseEntity.notFound().build())); @@ -71,9 +70,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi { consumerGroupOffsetsReset, ServerWebExchange exchange) { return consumerGroupOffsetsReset.flatMap(reset -> { - var cluster = - clustersStorage.getClusterByName(clusterName).orElseThrow(ClusterNotFoundException::new); - + var cluster = getCluster(clusterName); switch (reset.getResetType()) { case EARLIEST: return offsetsResetService diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java index b3da450036..a0a1ac7b18 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java @@ -23,19 +23,19 @@ import reactor.core.publisher.Mono; @RestController @RequiredArgsConstructor @Log4j2 -public class KafkaConnectController implements KafkaConnectApi { +public class KafkaConnectController extends AbstractController implements KafkaConnectApi { private final KafkaConnectService kafkaConnectService; @Override public Mono>> getConnects(String clusterName, ServerWebExchange exchange) { - return kafkaConnectService.getConnects(clusterName).map(ResponseEntity::ok); + return kafkaConnectService.getConnects(getCluster(clusterName)).map(ResponseEntity::ok); } @Override public Mono>> getConnectors(String clusterName, String connectName, ServerWebExchange exchange) { - Flux connectors = kafkaConnectService.getConnectors(clusterName, connectName); + var connectors = kafkaConnectService.getConnectors(getCluster(clusterName), connectName); return Mono.just(ResponseEntity.ok(connectors)); } @@ -43,7 +43,7 @@ public class KafkaConnectController implements KafkaConnectApi { public Mono> createConnector(String clusterName, String connectName, @Valid Mono connector, ServerWebExchange exchange) { - return kafkaConnectService.createConnector(clusterName, connectName, connector) + return kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector) .map(ResponseEntity::ok); } @@ -51,7 +51,7 @@ public class KafkaConnectController implements KafkaConnectApi { public Mono> getConnector(String clusterName, String connectName, String connectorName, ServerWebExchange exchange) { - return kafkaConnectService.getConnector(clusterName, connectName, connectorName) + return kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName) .map(ResponseEntity::ok); } @@ -59,7 +59,7 @@ public class KafkaConnectController implements KafkaConnectApi { public Mono> deleteConnector(String clusterName, String connectName, String connectorName, ServerWebExchange exchange) { - return kafkaConnectService.deleteConnector(clusterName, connectName, connectorName) + return kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName) .map(ResponseEntity::ok); } @@ -70,7 +70,8 @@ public class KafkaConnectController implements KafkaConnectApi { String search, ServerWebExchange exchange ) { - return Mono.just(ResponseEntity.ok(kafkaConnectService.getAllConnectors(clusterName, search))); + return Mono.just(ResponseEntity.ok( + kafkaConnectService.getAllConnectors(getCluster(clusterName), search))); } @Override @@ -78,7 +79,8 @@ public class KafkaConnectController implements KafkaConnectApi { String connectName, String connectorName, ServerWebExchange exchange) { - return kafkaConnectService.getConnectorConfig(clusterName, connectName, connectorName) + return kafkaConnectService + .getConnectorConfig(getCluster(clusterName), connectName, connectorName) .map(ResponseEntity::ok); } @@ -89,7 +91,7 @@ public class KafkaConnectController implements KafkaConnectApi { @Valid Mono requestBody, ServerWebExchange exchange) { return kafkaConnectService - .setConnectorConfig(clusterName, connectName, connectorName, requestBody) + .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody) .map(ResponseEntity::ok); } @@ -98,7 +100,8 @@ public class KafkaConnectController implements KafkaConnectApi { String connectorName, ConnectorActionDTO action, ServerWebExchange exchange) { - return kafkaConnectService.updateConnectorState(clusterName, connectName, connectorName, action) + return kafkaConnectService + .updateConnectorState(getCluster(clusterName), connectName, connectorName, action) .map(ResponseEntity::ok); } @@ -108,21 +111,24 @@ public class KafkaConnectController implements KafkaConnectApi { String connectorName, ServerWebExchange exchange) { return Mono.just(ResponseEntity - .ok(kafkaConnectService.getConnectorTasks(clusterName, connectName, connectorName))); + .ok(kafkaConnectService + .getConnectorTasks(getCluster(clusterName), connectName, connectorName))); } @Override public Mono> restartConnectorTask(String clusterName, String connectName, String connectorName, Integer taskId, ServerWebExchange exchange) { - return kafkaConnectService.restartConnectorTask(clusterName, connectName, connectorName, taskId) + return kafkaConnectService + .restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId) .map(ResponseEntity::ok); } @Override public Mono>> getConnectorPlugins( String clusterName, String connectName, ServerWebExchange exchange) { - return kafkaConnectService.getConnectorPlugins(clusterName, connectName) + return kafkaConnectService + .getConnectorPlugins(getCluster(clusterName), connectName) .map(ResponseEntity::ok); } @@ -132,7 +138,8 @@ public class KafkaConnectController implements KafkaConnectApi { String clusterName, String connectName, String pluginName, @Valid Mono requestBody, ServerWebExchange exchange) { return kafkaConnectService - .validateConnectorPluginConfig(clusterName, connectName, pluginName, requestBody) + .validateConnectorPluginConfig( + getCluster(clusterName), connectName, pluginName, requestBody) .map(ResponseEntity::ok); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java index e60b714f4e..4c4770f3bd 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java @@ -14,7 +14,7 @@ import reactor.core.publisher.Mono; @RestController @RequiredArgsConstructor @Log4j2 -public class KsqlController implements KsqlApi { +public class KsqlController extends AbstractController implements KsqlApi { private final KsqlService ksqlService; @Override @@ -22,6 +22,7 @@ public class KsqlController implements KsqlApi { Mono ksqlCommand, ServerWebExchange exchange) { - return ksqlService.executeKsqlCommand(clusterName, ksqlCommand).map(ResponseEntity::ok); + return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand) + .map(ResponseEntity::ok); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java index dcaebc3af7..b5b72041e7 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java @@ -7,7 +7,8 @@ import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SeekTypeDTO; import com.provectus.kafka.ui.model.TopicMessageEventDTO; import com.provectus.kafka.ui.model.TopicMessageSchemaDTO; -import com.provectus.kafka.ui.service.ClusterService; +import com.provectus.kafka.ui.service.MessagesService; +import com.provectus.kafka.ui.service.TopicsService; import java.util.Collections; import java.util.List; import java.util.Optional; @@ -26,15 +27,16 @@ import reactor.core.publisher.Mono; @RestController @RequiredArgsConstructor @Log4j2 -public class MessagesController implements MessagesApi { - private final ClusterService clusterService; +public class MessagesController extends AbstractController implements MessagesApi { + private final MessagesService messagesService; + private final TopicsService topicsService; @Override public Mono> deleteTopicMessages( String clusterName, String topicName, @Valid List partitions, ServerWebExchange exchange) { - return clusterService.deleteTopicMessages( - clusterName, + return messagesService.deleteTopicMessages( + getCluster(clusterName), topicName, Optional.ofNullable(partitions).orElse(List.of()) ).map(ResponseEntity::ok); @@ -48,7 +50,8 @@ public class MessagesController implements MessagesApi { return parseConsumerPosition(topicName, seekType, seekTo, seekDirection) .map(position -> ResponseEntity.ok( - clusterService.getMessages(clusterName, topicName, position, q, limit) + messagesService.loadMessages( + getCluster(clusterName), topicName, position, q, limit) ) ); } @@ -56,7 +59,7 @@ public class MessagesController implements MessagesApi { @Override public Mono> getTopicSchema( String clusterName, String topicName, ServerWebExchange exchange) { - return Mono.just(clusterService.getTopicSchema(clusterName, topicName)) + return Mono.just(topicsService.getTopicSchema(getCluster(clusterName), topicName)) .map(ResponseEntity::ok); } @@ -65,7 +68,7 @@ public class MessagesController implements MessagesApi { String clusterName, String topicName, @Valid Mono createTopicMessage, ServerWebExchange exchange) { return createTopicMessage.flatMap(msg -> - clusterService.sendMessage(clusterName, topicName, msg) + messagesService.sendMessage(getCluster(clusterName), topicName, msg).then() ).map(ResponseEntity::ok); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java index 7b4316b94f..bcc125fe73 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java @@ -12,7 +12,7 @@ import com.provectus.kafka.ui.model.TopicDTO; import com.provectus.kafka.ui.model.TopicDetailsDTO; import com.provectus.kafka.ui.model.TopicUpdateDTO; import com.provectus.kafka.ui.model.TopicsResponseDTO; -import com.provectus.kafka.ui.service.ClusterService; +import com.provectus.kafka.ui.service.TopicsService; import java.util.Optional; import javax.validation.Valid; import lombok.RequiredArgsConstructor; @@ -27,13 +27,13 @@ import reactor.core.publisher.Mono; @RestController @RequiredArgsConstructor @Log4j2 -public class TopicsController implements TopicsApi { - private final ClusterService clusterService; +public class TopicsController extends AbstractController implements TopicsApi { + private final TopicsService topicsService; @Override public Mono> createTopic( String clusterName, @Valid Mono topicCreation, ServerWebExchange exchange) { - return clusterService.createTopic(clusterName, topicCreation) + return topicsService.createTopic(getCluster(clusterName), topicCreation) .map(s -> new ResponseEntity<>(s, HttpStatus.OK)) .switchIfEmpty(Mono.just(ResponseEntity.notFound().build())); } @@ -41,7 +41,7 @@ public class TopicsController implements TopicsApi { @Override public Mono> deleteTopic( String clusterName, String topicName, ServerWebExchange exchange) { - return clusterService.deleteTopic(clusterName, topicName).map(ResponseEntity::ok); + return topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok); } @@ -49,7 +49,7 @@ public class TopicsController implements TopicsApi { public Mono>> getTopicConfigs( String clusterName, String topicName, ServerWebExchange exchange) { return Mono.just( - clusterService.getTopicConfigs(clusterName, topicName) + topicsService.getTopicConfigs(getCluster(clusterName), topicName) .map(Flux::fromIterable) .map(ResponseEntity::ok) .orElse(ResponseEntity.notFound().build()) @@ -60,7 +60,7 @@ public class TopicsController implements TopicsApi { public Mono> getTopicDetails( String clusterName, String topicName, ServerWebExchange exchange) { return Mono.just( - clusterService.getTopicDetails(clusterName, topicName) + topicsService.getTopicDetails(getCluster(clusterName), topicName) .map(ResponseEntity::ok) .orElse(ResponseEntity.notFound().build()) ); @@ -73,9 +73,9 @@ public class TopicsController implements TopicsApi { @Valid String search, @Valid TopicColumnsToSortDTO orderBy, ServerWebExchange exchange) { - return Mono.just(ResponseEntity.ok(clusterService + return Mono.just(ResponseEntity.ok(topicsService .getTopics( - clusterName, + getCluster(clusterName), Optional.ofNullable(page), Optional.ofNullable(perPage), Optional.ofNullable(showInternal), @@ -88,7 +88,8 @@ public class TopicsController implements TopicsApi { public Mono> updateTopic( String clusterId, String topicName, @Valid Mono topicUpdate, ServerWebExchange exchange) { - return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok); + return topicsService + .updateTopic(getCluster(clusterId), topicName, topicUpdate).map(ResponseEntity::ok); } @Override @@ -97,7 +98,8 @@ public class TopicsController implements TopicsApi { Mono partitionsIncrease, ServerWebExchange exchange) { return partitionsIncrease.flatMap( - partitions -> clusterService.increaseTopicPartitions(clusterName, topicName, partitions)) + partitions -> + topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)) .map(ResponseEntity::ok); } @@ -107,7 +109,8 @@ public class TopicsController implements TopicsApi { Mono replicationFactorChange, ServerWebExchange exchange) { return replicationFactorChange - .flatMap(rfc -> clusterService.changeReplicationFactor(clusterName, topicName, rfc)) + .flatMap(rfc -> + topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc)) .map(ResponseEntity::ok); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java index 97f00ecda0..89fe9eb3ed 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java @@ -1,46 +1,171 @@ package com.provectus.kafka.ui.service; +import com.provectus.kafka.ui.exception.IllegalEntityStateException; +import com.provectus.kafka.ui.exception.InvalidRequestApiException; +import com.provectus.kafka.ui.exception.LogDirNotFoundApiException; +import com.provectus.kafka.ui.exception.NotFoundException; +import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException; +import com.provectus.kafka.ui.mapper.ClusterMapper; +import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper; +import com.provectus.kafka.ui.model.BrokerConfigDTO; import com.provectus.kafka.ui.model.BrokerDTO; +import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO; +import com.provectus.kafka.ui.model.BrokerMetricsDTO; +import com.provectus.kafka.ui.model.BrokersLogdirsDTO; import com.provectus.kafka.ui.model.InternalBrokerConfig; import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.util.ClusterUtil; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import lombok.extern.log4j.Log4j2; +import org.apache.kafka.clients.admin.ConfigEntry; import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartitionReplica; +import org.apache.kafka.common.errors.InvalidRequestException; +import org.apache.kafka.common.errors.LogDirNotFoundException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; +import org.apache.kafka.common.requests.DescribeLogDirsResponse; +import org.springframework.stereotype.Service; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; -public interface BrokerService { - /** - * Get brokers config as map (Config name, Config). - * - * @param cluster - cluster - * @param brokerId - node id - * @return Mono of Map(String, InternalBrokerConfig) - */ - Mono> getBrokerConfigMap(KafkaCluster cluster, - Integer brokerId); +@Service +@RequiredArgsConstructor +@Log4j2 +public class BrokerService { - /** - * Get brokers config as Flux of InternalBrokerConfig. - * - * @param cluster - cluster - * @param brokerId - node id - * @return Flux of InternalBrokerConfig - */ - Flux getBrokersConfig(KafkaCluster cluster, Integer brokerId); + private final AdminClientService adminClientService; + private final DescribeLogDirsMapper describeLogDirsMapper; + private final ClusterMapper clusterMapper; - /** - * Get active brokers in cluster. - * - * @param cluster - cluster - * @return Flux of Broker - */ - Flux getBrokers(KafkaCluster cluster); + private Mono>> loadBrokersConfig( + KafkaCluster cluster, List brokersIds) { + return adminClientService.get(cluster) + .flatMap(ac -> ac.loadBrokersConfig(brokersIds)); + } + + private Mono> loadBrokersConfig( + KafkaCluster cluster, Integer brokerId) { + return loadBrokersConfig(cluster, Collections.singletonList(brokerId)) + .map(map -> map.values().stream() + .findFirst() + .orElseThrow(() -> new IllegalEntityStateException( + String.format("Config for broker %s not found", brokerId))) + ); + } + + public Mono> getBrokerConfigMap(KafkaCluster cluster, + Integer brokerId) { + return loadBrokersConfig(cluster, brokerId) + .map(list -> list.stream() + .collect(Collectors.toMap( + ConfigEntry::name, + ClusterUtil::mapToInternalBrokerConfig))); + } + + private Flux getBrokersConfig(KafkaCluster cluster, Integer brokerId) { + if (!cluster.getBrokers().contains(brokerId)) { + return Flux.error( + new NotFoundException(String.format("Broker with id %s not found", brokerId))); + } + return loadBrokersConfig(cluster, brokerId) + .map(list -> list.stream() + .map(ClusterUtil::mapToInternalBrokerConfig) + .collect(Collectors.toList())) + .flatMapMany(Flux::fromIterable); + } + + public Flux getBrokers(KafkaCluster cluster) { + return adminClientService + .get(cluster) + .flatMap(ReactiveAdminClient::describeCluster) + .map(description -> description.getNodes().stream() + .map(node -> { + BrokerDTO broker = new BrokerDTO(); + broker.setId(node.id()); + broker.setHost(node.host()); + return broker; + }).collect(Collectors.toList())) + .flatMapMany(Flux::fromIterable); + } + + public Mono getController(KafkaCluster cluster) { + return adminClientService + .get(cluster) + .flatMap(ReactiveAdminClient::describeCluster) + .map(ReactiveAdminClient.ClusterDescription::getController); + } + + public Mono updateBrokerLogDir(KafkaCluster cluster, + Integer broker, + BrokerLogdirUpdateDTO brokerLogDir) { + return adminClientService.get(cluster) + .flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker)); + } + + private Mono updateBrokerLogDir(ReactiveAdminClient admin, + BrokerLogdirUpdateDTO b, + Integer broker) { + + Map req = Map.of( + new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker), + b.getLogDir()); + return admin.alterReplicaLogDirs(req) + .onErrorResume(UnknownTopicOrPartitionException.class, + e -> Mono.error(new TopicOrPartitionNotFoundException())) + .onErrorResume(LogDirNotFoundException.class, + e -> Mono.error(new LogDirNotFoundApiException())) + .doOnError(log::error); + } + + public Mono updateBrokerConfigByName(KafkaCluster cluster, + Integer broker, + String name, + String value) { + return adminClientService.get(cluster) + .flatMap(ac -> ac.updateBrokerConfigByName(broker, name, value)) + .onErrorResume(InvalidRequestException.class, + e -> Mono.error(new InvalidRequestApiException(e.getMessage()))) + .doOnError(log::error); + } + + private Mono>> getClusterLogDirs( + KafkaCluster cluster, List reqBrokers) { + return adminClientService.get(cluster) + .flatMap(admin -> { + List brokers = new ArrayList<>(cluster.getBrokers()); + if (reqBrokers != null && !reqBrokers.isEmpty()) { + brokers.retainAll(reqBrokers); + } + return admin.describeLogDirs(brokers); + }) + .onErrorResume(TimeoutException.class, (TimeoutException e) -> { + log.error("Error during fetching log dirs", e); + return Mono.just(new HashMap<>()); + }); + } + + public Flux getAllBrokersLogdirs(KafkaCluster cluster, List brokers) { + return getClusterLogDirs(cluster, brokers) + .map(describeLogDirsMapper::toBrokerLogDirsList) + .flatMapMany(Flux::fromIterable); + } + + public Flux getBrokerConfig(KafkaCluster cluster, Integer brokerId) { + return getBrokersConfig(cluster, brokerId) + .map(clusterMapper::toBrokerConfig); + } + + public Mono getBrokerMetrics(KafkaCluster cluster, Integer id) { + return Mono.just(cluster.getMetrics().getInternalBrokerMetrics()) + .map(m -> m.get(id)) + .map(clusterMapper::toBrokerMetrics); + } - /** - * Get cluster controller node. - * - * @param cluster - cluster - * @return Controller node - */ - Mono getController(KafkaCluster cluster); } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java deleted file mode 100644 index 1d6c3a3fcb..0000000000 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerServiceImpl.java +++ /dev/null @@ -1,89 +0,0 @@ -package com.provectus.kafka.ui.service; - -import com.provectus.kafka.ui.exception.IllegalEntityStateException; -import com.provectus.kafka.ui.exception.NotFoundException; -import com.provectus.kafka.ui.model.BrokerDTO; -import com.provectus.kafka.ui.model.InternalBrokerConfig; -import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.util.ClusterUtil; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import lombok.RequiredArgsConstructor; -import lombok.extern.log4j.Log4j2; -import org.apache.kafka.clients.admin.ConfigEntry; -import org.apache.kafka.common.Node; -import org.springframework.stereotype.Service; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -@Service -@RequiredArgsConstructor -@Log4j2 -public class BrokerServiceImpl implements BrokerService { - - private final AdminClientService adminClientService; - - private Mono>> loadBrokersConfig( - KafkaCluster cluster, List brokersIds) { - return adminClientService.get(cluster) - .flatMap(ac -> ac.loadBrokersConfig(brokersIds)); - } - - private Mono> loadBrokersConfig( - KafkaCluster cluster, Integer brokerId) { - return loadBrokersConfig(cluster, Collections.singletonList(brokerId)) - .map(map -> map.values().stream() - .findFirst() - .orElseThrow(() -> new IllegalEntityStateException( - String.format("Config for broker %s not found", brokerId))) - ); - } - - @Override - public Mono> getBrokerConfigMap(KafkaCluster cluster, - Integer brokerId) { - return loadBrokersConfig(cluster, brokerId) - .map(list -> list.stream() - .collect(Collectors.toMap( - ConfigEntry::name, - ClusterUtil::mapToInternalBrokerConfig))); - } - - @Override - public Flux getBrokersConfig(KafkaCluster cluster, Integer brokerId) { - if (!cluster.getBrokers().contains(brokerId)) { - return Flux.error( - new NotFoundException(String.format("Broker with id %s not found", brokerId))); - } - return loadBrokersConfig(cluster, brokerId) - .map(list -> list.stream() - .map(ClusterUtil::mapToInternalBrokerConfig) - .collect(Collectors.toList())) - .flatMapMany(Flux::fromIterable); - } - - @Override - public Flux getBrokers(KafkaCluster cluster) { - return adminClientService - .get(cluster) - .flatMap(ReactiveAdminClient::describeCluster) - .map(description -> description.getNodes().stream() - .map(node -> { - BrokerDTO broker = new BrokerDTO(); - broker.setId(node.id()); - broker.setHost(node.host()); - return broker; - }).collect(Collectors.toList())) - .flatMapMany(Flux::fromIterable); - } - - @Override - public Mono getController(KafkaCluster cluster) { - return adminClientService - .get(cluster) - .flatMap(ReactiveAdminClient::describeCluster) - .map(ReactiveAdminClient.ClusterDescription::getController); - } -} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java index de4e5928b5..2b2dc33e59 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java @@ -1,73 +1,26 @@ package com.provectus.kafka.ui.service; import com.provectus.kafka.ui.exception.ClusterNotFoundException; -import com.provectus.kafka.ui.exception.IllegalEntityStateException; -import com.provectus.kafka.ui.exception.NotFoundException; -import com.provectus.kafka.ui.exception.TopicNotFoundException; -import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.mapper.ClusterMapper; -import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper; -import com.provectus.kafka.ui.model.BrokerConfigDTO; -import com.provectus.kafka.ui.model.BrokerDTO; -import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO; -import com.provectus.kafka.ui.model.BrokerMetricsDTO; -import com.provectus.kafka.ui.model.BrokersLogdirsDTO; import com.provectus.kafka.ui.model.ClusterDTO; import com.provectus.kafka.ui.model.ClusterMetricsDTO; import com.provectus.kafka.ui.model.ClusterStatsDTO; -import com.provectus.kafka.ui.model.ConsumerGroupDTO; -import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO; -import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.CreateTopicMessageDTO; -import com.provectus.kafka.ui.model.Feature; -import com.provectus.kafka.ui.model.InternalTopic; import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.PartitionsIncreaseDTO; -import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO; -import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO; -import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO; -import com.provectus.kafka.ui.model.TopicColumnsToSortDTO; -import com.provectus.kafka.ui.model.TopicConfigDTO; -import com.provectus.kafka.ui.model.TopicCreationDTO; -import com.provectus.kafka.ui.model.TopicDTO; -import com.provectus.kafka.ui.model.TopicDetailsDTO; -import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.model.TopicMessageSchemaDTO; -import com.provectus.kafka.ui.model.TopicUpdateDTO; -import com.provectus.kafka.ui.model.TopicsResponseDTO; -import com.provectus.kafka.ui.serde.DeserializationService; -import com.provectus.kafka.ui.util.ClusterUtil; -import java.util.Collections; -import java.util.Comparator; import java.util.List; -import java.util.Optional; -import java.util.function.Predicate; import java.util.stream.Collectors; import lombok.RequiredArgsConstructor; -import lombok.SneakyThrows; import lombok.extern.log4j.Log4j2; -import org.apache.commons.lang3.StringUtils; -import org.apache.kafka.common.errors.GroupIdNotFoundException; -import org.apache.kafka.common.errors.GroupNotEmptyException; -import org.jetbrains.annotations.NotNull; import org.springframework.stereotype.Service; -import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @Service @RequiredArgsConstructor @Log4j2 public class ClusterService { - private static final Integer DEFAULT_PAGE_SIZE = 25; private final ClustersStorage clustersStorage; private final ClusterMapper clusterMapper; - private final KafkaService kafkaService; - private final AdminClientService adminClientService; - private final BrokerService brokerService; - private final ConsumingService consumingService; - private final DeserializationService deserializationService; - private final DescribeLogDirsMapper describeLogDirsMapper; + private final MetricsService metricsService; public List getClusters() { return clustersStorage.getKafkaClusters() @@ -76,13 +29,6 @@ public class ClusterService { .collect(Collectors.toList()); } - public Mono getBrokerMetrics(String name, Integer id) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(name) - .map(c -> c.getMetrics().getInternalBrokerMetrics()) - .map(m -> m.get(id)) - .map(clusterMapper::toBrokerMetrics)); - } - public Mono getClusterStats(String name) { return Mono.justOrEmpty( clustersStorage.getClusterByName(name) @@ -99,293 +45,12 @@ public class ClusterService { ); } - - public TopicsResponseDTO getTopics(String name, Optional page, - Optional nullablePerPage, - Optional showInternal, - Optional search, - Optional sortBy) { - Predicate positiveInt = i -> i > 0; - int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE); - var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage; - var cluster = clustersStorage.getClusterByName(name) - .orElseThrow(ClusterNotFoundException::new); - List topics = cluster.getTopics().values().stream() - .filter(topic -> !topic.isInternal() - || showInternal - .map(i -> topic.isInternal() == i) - .orElse(true)) - .filter(topic -> - search - .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s)) - .orElse(true)) - .sorted(getComparatorForTopic(sortBy)) - .collect(Collectors.toList()); - var totalPages = (topics.size() / perPage) - + (topics.size() % perPage == 0 ? 0 : 1); - return new TopicsResponseDTO() - .pageCount(totalPages) - .topics( - topics.stream() - .skip(topicsToSkip) - .limit(perPage) - .map(t -> - clusterMapper.toTopic( - t.toBuilder().partitions( - kafkaService.getTopicPartitions(cluster, t) - ).build() - ) - ) - .collect(Collectors.toList()) - ); - } - - private Comparator getComparatorForTopic(Optional sortBy) { - var defaultComparator = Comparator.comparing(InternalTopic::getName); - if (sortBy.isEmpty()) { - return defaultComparator; - } - switch (sortBy.get()) { - case TOTAL_PARTITIONS: - return Comparator.comparing(InternalTopic::getPartitionCount); - case OUT_OF_SYNC_REPLICAS: - return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas()); - case REPLICATION_FACTOR: - return Comparator.comparing(InternalTopic::getReplicationFactor); - case NAME: - default: - return defaultComparator; - } - } - - public Optional getTopicDetails(String name, String topicName) { - return clustersStorage.getClusterByName(name) - .flatMap(c -> - Optional.ofNullable(c.getTopics()).map(l -> l.get(topicName)).map( - t -> t.toBuilder().partitions( - kafkaService.getTopicPartitions(c, t) - ).build() - ).map(t -> clusterMapper.toTopicDetails(t, c.getMetrics())) - ); - } - - public Optional> getTopicConfigs(String name, String topicName) { - return clustersStorage.getClusterByName(name) - .map(KafkaCluster::getTopics) - .map(t -> t.get(topicName)) - .map(t -> t.getTopicConfigs().stream().map(clusterMapper::toTopicConfig) - .collect(Collectors.toList())); - } - - public Mono createTopic(String clusterName, Mono topicCreation) { - return clustersStorage.getClusterByName(clusterName).map(cluster -> - kafkaService.createTopic(cluster, topicCreation) - .doOnNext(t -> updateCluster(t, clusterName, cluster)) - .map(clusterMapper::toTopic) - ).orElse(Mono.empty()); - } - - @SneakyThrows - public Mono getConsumerGroupDetail(String clusterName, - String consumerGroupId) { - var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new); - return kafkaService.getConsumerGroups( - cluster, - Optional.empty(), - Collections.singletonList(consumerGroupId) - ).filter(groups -> !groups.isEmpty()).map(groups -> groups.get(0)).map( - ClusterUtil::convertToConsumerGroupDetails - ); - } - - public Mono> getConsumerGroups(String clusterName) { - return getConsumerGroups(clusterName, Optional.empty()); - } - - public Mono> getConsumerGroups(String clusterName, - Optional topic) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .switchIfEmpty(Mono.error(ClusterNotFoundException::new)) - .flatMap(c -> kafkaService.getConsumerGroups(c, topic, Collections.emptyList())) - .map(c -> - c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList()) - ); - } - - public Flux getBrokers(String clusterName) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .switchIfEmpty(Mono.error(ClusterNotFoundException::new)) - .flatMapMany(brokerService::getBrokers); - } - - public Flux getBrokerConfig(String clusterName, Integer brokerId) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .switchIfEmpty(Mono.error(ClusterNotFoundException::new)) - .flatMapMany(c -> brokerService.getBrokersConfig(c, brokerId)) - .map(clusterMapper::toBrokerConfig); - } - - @SneakyThrows - public Mono updateTopic(String clusterName, String topicName, - Mono topicUpdate) { - return clustersStorage.getClusterByName(clusterName).map(cl -> - topicUpdate - .flatMap(t -> kafkaService.updateTopic(cl, topicName, t)) - .doOnNext(t -> updateCluster(t, clusterName, cl)) - .map(clusterMapper::toTopic) - ).orElse(Mono.empty()); - } - - public Mono deleteTopic(String clusterName, String topicName) { - var cluster = clustersStorage.getClusterByName(clusterName) - .orElseThrow(ClusterNotFoundException::new); - var topic = getTopicDetails(clusterName, topicName) - .orElseThrow(TopicNotFoundException::new); - if (cluster.getFeatures().contains(Feature.TOPIC_DELETION)) { - return kafkaService.deleteTopic(cluster, topic.getName()) - .doOnSuccess(t -> updateCluster(topicName, clusterName, cluster)); - } else { - return Mono.error(new ValidationException("Topic deletion restricted")); - } - } - - private KafkaCluster updateCluster(InternalTopic topic, String clusterName, - KafkaCluster cluster) { - final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topic); - clustersStorage.setKafkaCluster(clusterName, updatedCluster); - return updatedCluster; - } - - private KafkaCluster updateCluster(String topicToDelete, String clusterName, - KafkaCluster cluster) { - final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topicToDelete); - clustersStorage.setKafkaCluster(clusterName, updatedCluster); - return updatedCluster; - } - public Mono updateCluster(String clusterName) { return clustersStorage.getClusterByName(clusterName) - .map(cluster -> kafkaService.getUpdatedCluster(cluster) + .map(cluster -> metricsService.updateClusterMetrics(cluster) .doOnNext(updatedCluster -> clustersStorage .setKafkaCluster(updatedCluster.getName(), updatedCluster)) .map(clusterMapper::toCluster)) .orElse(Mono.error(new ClusterNotFoundException())); } - - public Flux getMessages(String clusterName, String topicName, - ConsumerPosition consumerPosition, String query, - Integer limit) { - return clustersStorage.getClusterByName(clusterName) - .map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit)) - .orElse(Flux.empty()); - } - - public Mono deleteTopicMessages(String clusterName, String topicName, - List partitions) { - var cluster = clustersStorage.getClusterByName(clusterName) - .orElseThrow(ClusterNotFoundException::new); - if (!cluster.getTopics().containsKey(topicName)) { - throw new TopicNotFoundException(); - } - return consumingService.offsetsForDeletion(cluster, topicName, partitions) - .flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets)); - } - - public Mono increaseTopicPartitions( - String clusterName, - String topicName, - PartitionsIncreaseDTO partitionsIncrease) { - return clustersStorage.getClusterByName(clusterName).map(cluster -> - kafkaService.increaseTopicPartitions(cluster, topicName, partitionsIncrease) - .doOnNext(t -> updateCluster(t, cluster.getName(), cluster)) - .map(t -> new PartitionsIncreaseResponseDTO() - .topicName(t.getName()) - .totalPartitionsCount(t.getPartitionCount()))) - .orElse(Mono.error(new ClusterNotFoundException( - String.format("No cluster for name '%s'", clusterName) - ))); - } - - public Mono deleteConsumerGroupById(String clusterName, - String groupId) { - return clustersStorage.getClusterByName(clusterName) - .map(cluster -> adminClientService.get(cluster) - .flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId))) - .onErrorResume(this::reThrowCustomException) - ) - .orElse(Mono.empty()); - } - - public TopicMessageSchemaDTO getTopicSchema(String clusterName, String topicName) { - var cluster = clustersStorage.getClusterByName(clusterName) - .orElseThrow(ClusterNotFoundException::new); - if (!cluster.getTopics().containsKey(topicName)) { - throw new TopicNotFoundException(); - } - return deserializationService - .getRecordDeserializerForCluster(cluster) - .getTopicSchema(topicName); - } - - public Mono sendMessage(String clusterName, String topicName, CreateTopicMessageDTO msg) { - var cluster = clustersStorage.getClusterByName(clusterName) - .orElseThrow(ClusterNotFoundException::new); - if (!cluster.getTopics().containsKey(topicName)) { - throw new TopicNotFoundException(); - } - if (msg.getKey() == null && msg.getContent() == null) { - throw new ValidationException("Invalid message: both key and value can't be null"); - } - if (msg.getPartition() != null - && msg.getPartition() > cluster.getTopics().get(topicName).getPartitionCount() - 1) { - throw new ValidationException("Invalid partition"); - } - return kafkaService.sendMessage(cluster, topicName, msg).then(); - } - - @NotNull - private Mono reThrowCustomException(Throwable e) { - if (e instanceof GroupIdNotFoundException) { - return Mono.error(new NotFoundException("The group id does not exist")); - } else if (e instanceof GroupNotEmptyException) { - return Mono.error(new IllegalEntityStateException("The group is not empty")); - } else { - return Mono.error(e); - } - } - - public Mono changeReplicationFactor( - String clusterName, - String topicName, - ReplicationFactorChangeDTO replicationFactorChange) { - return clustersStorage.getClusterByName(clusterName).map(cluster -> - kafkaService.changeReplicationFactor(cluster, topicName, replicationFactorChange) - .doOnNext(topic -> updateCluster(topic, cluster.getName(), cluster)) - .map(t -> new ReplicationFactorChangeResponseDTO() - .topicName(t.getName()) - .totalReplicationFactor(t.getReplicationFactor()))) - .orElse(Mono.error(new ClusterNotFoundException( - String.format("No cluster for name '%s'", clusterName)))); - } - - public Flux getAllBrokersLogdirs(String clusterName, List brokers) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .flatMap(c -> kafkaService.getClusterLogDirs(c, brokers)) - .map(describeLogDirsMapper::toBrokerLogDirsList) - .flatMapMany(Flux::fromIterable); - } - - public Mono updateBrokerLogDir( - String clusterName, Integer id, BrokerLogdirUpdateDTO brokerLogDir) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .flatMap(c -> kafkaService.updateBrokerLogDir(c, id, brokerLogDir)); - } - - public Mono updateBrokerConfigByName(String clusterName, - Integer id, - String name, - String value) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .flatMap(c -> kafkaService.updateBrokerConfigByName(c, id, name, value)); - } } \ No newline at end of file diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java index 034a81065c..5dd1938fea 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java @@ -15,7 +15,7 @@ public class ClustersMetricsScheduler { private final ClustersStorage clustersStorage; - private final MetricsUpdateService metricsUpdateService; + private final MetricsService metricsService; @Scheduled(fixedRateString = "${kafka.update-metrics-rate-millis:30000}") public void updateMetrics() { @@ -23,7 +23,10 @@ public class ClustersMetricsScheduler { .parallel() .runOn(Schedulers.parallel()) .map(Map.Entry::getValue) - .flatMap(metricsUpdateService::updateMetrics) + .flatMap(cluster -> { + log.debug("Start getting metrics for kafkaCluster: {}", cluster.getName()); + return metricsService.updateClusterMetrics(cluster); + }) .doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s)) .then() .block(); diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java index 638af22775..a1fb3bea01 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java @@ -2,6 +2,7 @@ package com.provectus.kafka.ui.service; import com.provectus.kafka.ui.config.ClustersProperties; import com.provectus.kafka.ui.mapper.ClusterMapper; +import com.provectus.kafka.ui.model.InternalTopic; import com.provectus.kafka.ui.model.KafkaCluster; import java.util.Collection; import java.util.HashMap; @@ -48,8 +49,27 @@ public class ClustersStorage { return Optional.ofNullable(kafkaClusters.get(clusterName)); } - public void setKafkaCluster(String key, KafkaCluster kafkaCluster) { + public KafkaCluster setKafkaCluster(String key, KafkaCluster kafkaCluster) { this.kafkaClusters.put(key, kafkaCluster); + return kafkaCluster; + } + + public void onTopicDeleted(KafkaCluster cluster, String topicToDelete) { + var topics = Optional.ofNullable(cluster.getTopics()) + .map(HashMap::new) + .orElseGet(HashMap::new); + topics.remove(topicToDelete); + var updatedCluster = cluster.toBuilder().topics(topics).build(); + setKafkaCluster(cluster.getName(), updatedCluster); + } + + public void onTopicUpdated(KafkaCluster cluster, InternalTopic updatedTopic) { + var topics = Optional.ofNullable(cluster.getTopics()) + .map(HashMap::new) + .orElseGet(HashMap::new); + topics.put(updatedTopic.getName(), updatedTopic); + var updatedCluster = cluster.toBuilder().topics(topics).build(); + setKafkaCluster(cluster.getName(), updatedCluster); } public Map getKafkaClustersMap() { diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumerGroupService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumerGroupService.java new file mode 100644 index 0000000000..f3e60af306 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumerGroupService.java @@ -0,0 +1,133 @@ +package com.provectus.kafka.ui.service; + +import com.provectus.kafka.ui.model.ConsumerGroupDTO; +import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO; +import com.provectus.kafka.ui.model.InternalConsumerGroup; +import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.util.ClusterUtil; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.UUID; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.serialization.BytesDeserializer; +import org.apache.kafka.common.utils.Bytes; +import org.springframework.stereotype.Service; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + + +@Service +@RequiredArgsConstructor +public class ConsumerGroupService { + + private final AdminClientService adminClientService; + + private Mono> getConsumerGroupsInternal(KafkaCluster cluster) { + return adminClientService.get(cluster).flatMap(ac -> + ac.listConsumerGroups() + .flatMap(groupIds -> getConsumerGroupsInternal(cluster, groupIds))); + } + + private Mono> getConsumerGroupsInternal(KafkaCluster cluster, + List groupIds) { + return adminClientService.get(cluster).flatMap(ac -> + ac.describeConsumerGroups(groupIds) + .map(Map::values) + .flatMap(descriptions -> + Flux.fromIterable(descriptions) + .parallel() + .flatMap(d -> + ac.listConsumerGroupOffsets(d.groupId()) + .map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets)) + ) + .sequential() + .collectList())); + } + + public Mono> getConsumerGroups( + KafkaCluster cluster, Optional topic, List groupIds) { + final Mono> consumerGroups; + + if (groupIds.isEmpty()) { + consumerGroups = getConsumerGroupsInternal(cluster); + } else { + consumerGroups = getConsumerGroupsInternal(cluster, groupIds); + } + + return consumerGroups.map(c -> + c.stream() + .map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic)) + .filter(Optional::isPresent) + .map(Optional::get) + .map(g -> + g.toBuilder().endOffsets( + topicPartitionsEndOffsets(cluster, g.getOffsets().keySet()) + ).build() + ) + .collect(Collectors.toList()) + ); + } + + public Mono> getConsumerGroups(KafkaCluster cluster) { + return getConsumerGroups(cluster, Optional.empty()); + } + + public Mono> getConsumerGroups(KafkaCluster cluster, + Optional topic) { + return getConsumerGroups(cluster, topic, Collections.emptyList()) + .map(c -> + c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList()) + ); + } + + private Map topicPartitionsEndOffsets( + KafkaCluster cluster, Collection topicPartitions) { + try (KafkaConsumer consumer = createConsumer(cluster)) { + return consumer.endOffsets(topicPartitions); + } + } + + public Mono getConsumerGroupDetail(KafkaCluster cluster, + String consumerGroupId) { + return getConsumerGroups( + cluster, + Optional.empty(), + Collections.singletonList(consumerGroupId) + ).filter(groups -> !groups.isEmpty()).map(groups -> groups.get(0)).map( + ClusterUtil::convertToConsumerGroupDetails + ); + } + + public Mono deleteConsumerGroupById(KafkaCluster cluster, + String groupId) { + return adminClientService.get(cluster) + .flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId))); + } + + public KafkaConsumer createConsumer(KafkaCluster cluster) { + return createConsumer(cluster, Map.of()); + } + + public KafkaConsumer createConsumer(KafkaCluster cluster, + Map properties) { + Properties props = new Properties(); + props.putAll(cluster.getProperties()); + props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID()); + props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); + props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class); + props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class); + props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + props.putAll(properties); + + return new KafkaConsumer<>(props); + } + +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java deleted file mode 100644 index 1b76f1f0d7..0000000000 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java +++ /dev/null @@ -1,119 +0,0 @@ -package com.provectus.kafka.ui.service; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; -import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; -import com.provectus.kafka.ui.model.ConsumerPosition; -import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.SeekDirectionDTO; -import com.provectus.kafka.ui.model.TopicMessageDTO; -import com.provectus.kafka.ui.model.TopicMessageEventDTO; -import com.provectus.kafka.ui.serde.DeserializationService; -import com.provectus.kafka.ui.serde.RecordSerDe; -import com.provectus.kafka.ui.util.FilterTopicMessageEvents; -import com.provectus.kafka.ui.util.OffsetsSeekBackward; -import com.provectus.kafka.ui.util.OffsetsSeekForward; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.stream.Collectors; -import lombok.RequiredArgsConstructor; -import lombok.extern.log4j.Log4j2; -import org.apache.kafka.clients.consumer.Consumer; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.Bytes; -import org.springframework.stereotype.Service; -import org.springframework.util.StringUtils; -import reactor.core.publisher.Flux; -import reactor.core.publisher.FluxSink; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; - -@Service -@Log4j2 -@RequiredArgsConstructor -public class ConsumingService { - - private static final int MAX_RECORD_LIMIT = 100; - private static final int DEFAULT_RECORD_LIMIT = 20; - - private final KafkaService kafkaService; - private final DeserializationService deserializationService; - private final ObjectMapper objectMapper = new ObjectMapper(); - - public Flux loadMessages(KafkaCluster cluster, String topic, - ConsumerPosition consumerPosition, String query, - Integer limit) { - int recordsLimit = Optional.ofNullable(limit) - .map(s -> Math.min(s, MAX_RECORD_LIMIT)) - .orElse(DEFAULT_RECORD_LIMIT); - - java.util.function.Consumer> emitter; - RecordSerDe recordDeserializer = - deserializationService.getRecordDeserializerForCluster(cluster); - if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) { - emitter = new ForwardRecordEmitter( - () -> kafkaService.createConsumer(cluster), - new OffsetsSeekForward(topic, consumerPosition), - recordDeserializer - ); - } else { - emitter = new BackwardRecordEmitter( - (Map props) -> kafkaService.createConsumer(cluster, props), - new OffsetsSeekBackward(topic, consumerPosition, recordsLimit), - recordDeserializer - ); - } - return Flux.create(emitter) - .filter(m -> filterTopicMessage(m, query)) - .takeWhile(new FilterTopicMessageEvents(recordsLimit)) - .subscribeOn(Schedulers.elastic()) - .share(); - } - - public Mono> offsetsForDeletion(KafkaCluster cluster, String topicName, - List partitionsToInclude) { - return Mono.fromSupplier(() -> { - try (KafkaConsumer consumer = kafkaService.createConsumer(cluster)) { - return significantOffsets(consumer, topicName, partitionsToInclude); - } catch (Exception e) { - log.error("Error occurred while consuming records", e); - throw new RuntimeException(e); - } - }); - } - - /** - * returns end offsets for partitions where start offset != end offsets. - * This is useful when we need to verify that partition is not empty. - */ - public static Map significantOffsets(Consumer consumer, - String topicName, - Collection - partitionsToInclude) { - var partitions = consumer.partitionsFor(topicName).stream() - .filter(p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition())) - .map(p -> new TopicPartition(topicName, p.partition())) - .collect(Collectors.toList()); - var beginningOffsets = consumer.beginningOffsets(partitions); - var endOffsets = consumer.endOffsets(partitions); - return endOffsets.entrySet().stream() - .filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - } - - private boolean filterTopicMessage(TopicMessageEventDTO message, String query) { - log.info("filter"); - if (StringUtils.isEmpty(query) - || !message.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) { - return true; - } - - final TopicMessageDTO msg = message.getMessage(); - return (!StringUtils.isEmpty(msg.getKey()) && msg.getKey().contains(query)) - || (!StringUtils.isEmpty(msg.getContent()) && msg.getContent().contains(query)); - } - -} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java index 58610259da..eb26369a63 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java @@ -1,15 +1,61 @@ package com.provectus.kafka.ui.service; +import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE; + import com.provectus.kafka.ui.model.Feature; import com.provectus.kafka.ui.model.KafkaCluster; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.function.Predicate; +import lombok.RequiredArgsConstructor; +import lombok.extern.log4j.Log4j2; +import org.apache.kafka.common.Node; +import org.springframework.stereotype.Service; import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; -public interface FeatureService { - /** - * Get available features. - * - * @param cluster - cluster - * @return List of Feature - */ - Flux getAvailableFeatures(KafkaCluster cluster); +@Service +@RequiredArgsConstructor +@Log4j2 +public class FeatureService { + + private final BrokerService brokerService; + + public Flux getAvailableFeatures(KafkaCluster cluster) { + List> features = new ArrayList<>(); + + if (Optional.ofNullable(cluster.getKafkaConnect()) + .filter(Predicate.not(List::isEmpty)) + .isPresent()) { + features.add(Mono.just(Feature.KAFKA_CONNECT)); + } + + if (cluster.getKsqldbServer() != null) { + features.add(Mono.just(Feature.KSQL_DB)); + } + + if (cluster.getSchemaRegistry() != null) { + features.add(Mono.just(Feature.SCHEMA_REGISTRY)); + } + + features.add( + isTopicDeletionEnabled(cluster) + .flatMap(r -> r ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty()) + ); + + return Flux.fromIterable(features).flatMap(m -> m); + } + + private Mono isTopicDeletionEnabled(KafkaCluster cluster) { + return brokerService.getController(cluster) + .map(Node::id) + .flatMap(broker -> brokerService.getBrokerConfigMap(cluster, broker)) + .map(config -> { + if (config != null && config.get(DELETE_TOPIC_ENABLE) != null) { + return Boolean.parseBoolean(config.get(DELETE_TOPIC_ENABLE).getValue()); + } + return false; + }); + } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java deleted file mode 100644 index 413c237d9c..0000000000 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureServiceImpl.java +++ /dev/null @@ -1,62 +0,0 @@ -package com.provectus.kafka.ui.service; - -import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE; - -import com.provectus.kafka.ui.model.Feature; -import com.provectus.kafka.ui.model.KafkaCluster; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.function.Predicate; -import lombok.RequiredArgsConstructor; -import lombok.extern.log4j.Log4j2; -import org.apache.kafka.common.Node; -import org.springframework.stereotype.Service; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -@Service -@RequiredArgsConstructor -@Log4j2 -public class FeatureServiceImpl implements FeatureService { - - private final BrokerService brokerService; - - @Override - public Flux getAvailableFeatures(KafkaCluster cluster) { - List> features = new ArrayList<>(); - - if (Optional.ofNullable(cluster.getKafkaConnect()) - .filter(Predicate.not(List::isEmpty)) - .isPresent()) { - features.add(Mono.just(Feature.KAFKA_CONNECT)); - } - - if (cluster.getKsqldbServer() != null) { - features.add(Mono.just(Feature.KSQL_DB)); - } - - if (cluster.getSchemaRegistry() != null) { - features.add(Mono.just(Feature.SCHEMA_REGISTRY)); - } - - features.add( - topicDeletionCheck(cluster) - .flatMap(r -> r ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty()) - ); - - return Flux.fromIterable(features).flatMap(m -> m); - } - - private Mono topicDeletionCheck(KafkaCluster cluster) { - return brokerService.getController(cluster) - .map(Node::id) - .flatMap(broker -> brokerService.getBrokerConfigMap(cluster, broker)) - .map(config -> { - if (config != null && config.get(DELETE_TOPIC_ENABLE) != null) { - return Boolean.parseBoolean(config.get(DELETE_TOPIC_ENABLE).getValue()); - } - return false; - }); - } -} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java index d05655ca13..1b6eb30773 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java @@ -21,7 +21,6 @@ import com.provectus.kafka.ui.model.KafkaConnectCluster; import com.provectus.kafka.ui.model.NewConnectorDTO; import com.provectus.kafka.ui.model.TaskDTO; import com.provectus.kafka.ui.model.connect.InternalConnectInfo; -import java.util.Collection; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -47,25 +46,24 @@ public class KafkaConnectService { private final KafkaConnectMapper kafkaConnectMapper; private final ObjectMapper objectMapper; - public Mono> getConnects(String clusterName) { + public Mono> getConnects(KafkaCluster cluster) { return Mono.just( - Flux.fromIterable(clustersStorage.getClusterByName(clusterName) - .map(KafkaCluster::getKafkaConnect).stream() - .flatMap(Collection::stream) - .map(clusterMapper::toKafkaConnect) - .collect(Collectors.toList()) + Flux.fromIterable( + cluster.getKafkaConnect().stream() + .map(clusterMapper::toKafkaConnect) + .collect(Collectors.toList()) ) ); } - public Flux getAllConnectors(final String clusterName, + public Flux getAllConnectors(final KafkaCluster cluster, final String search) { - return getConnects(clusterName) + return getConnects(cluster) .flatMapMany(Function.identity()) - .flatMap(connect -> getConnectorNames(clusterName, connect)) - .flatMap(pair -> getConnector(clusterName, pair.getT1(), pair.getT2())) + .flatMap(connect -> getConnectorNames(cluster, connect)) + .flatMap(pair -> getConnector(cluster, pair.getT1(), pair.getT2())) .flatMap(connector -> - getConnectorConfig(clusterName, connector.getConnect(), connector.getName()) + getConnectorConfig(cluster, connector.getConnect(), connector.getName()) .map(config -> InternalConnectInfo.builder() .connector(connector) .config(config) @@ -74,7 +72,7 @@ public class KafkaConnectService { ) .flatMap(connectInfo -> { ConnectorDTO connector = connectInfo.getConnector(); - return getConnectorTasks(clusterName, connector.getConnect(), connector.getName()) + return getConnectorTasks(cluster, connector.getConnect(), connector.getName()) .collectList() .map(tasks -> InternalConnectInfo.builder() .connector(connector) @@ -85,7 +83,7 @@ public class KafkaConnectService { }) .flatMap(connectInfo -> { ConnectorDTO connector = connectInfo.getConnector(); - return getConnectorTopics(clusterName, connector.getConnect(), connector.getName()) + return getConnectorTopics(cluster, connector.getConnect(), connector.getName()) .map(ct -> InternalConnectInfo.builder() .connector(connector) .config(connectInfo.getConfig()) @@ -115,9 +113,9 @@ public class KafkaConnectService { .map(String::toUpperCase); } - private Mono getConnectorTopics(String clusterName, String connectClusterName, + private Mono getConnectorTopics(KafkaCluster cluster, String connectClusterName, String connectorName) { - return getConnectAddress(clusterName, connectClusterName) + return getConnectAddress(cluster, connectClusterName) .flatMap(connectUrl -> KafkaConnectClients .withBaseUrl(connectUrl) .getConnectorTopics(connectorName) @@ -125,8 +123,8 @@ public class KafkaConnectService { ); } - private Flux> getConnectorNames(String clusterName, ConnectDTO connect) { - return getConnectors(clusterName, connect.getName()) + private Flux> getConnectorNames(KafkaCluster cluster, ConnectDTO connect) { + return getConnectors(cluster, connect.getName()) .collectList().map(e -> e.get(0)) // for some reason `getConnectors` method returns the response as a single string .map(this::parseToList) @@ -140,30 +138,30 @@ public class KafkaConnectService { }); } - public Flux getConnectors(String clusterName, String connectName) { - return getConnectAddress(clusterName, connectName) + public Flux getConnectors(KafkaCluster cluster, String connectName) { + return getConnectAddress(cluster, connectName) .flatMapMany(connect -> KafkaConnectClients.withBaseUrl(connect).getConnectors(null) .doOnError(log::error) ); } - public Mono createConnector(String clusterName, String connectName, + public Mono createConnector(KafkaCluster cluster, String connectName, Mono connector) { - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMap(connect -> connector .map(kafkaConnectMapper::toClient) .flatMap(c -> KafkaConnectClients.withBaseUrl(connect).createConnector(c) ) - .flatMap(c -> getConnector(clusterName, connectName, c.getName())) + .flatMap(c -> getConnector(cluster, connectName, c.getName())) ); } - public Mono getConnector(String clusterName, String connectName, + public Mono getConnector(KafkaCluster cluster, String connectName, String connectorName) { - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).getConnector(connectorName) .map(kafkaConnectMapper::fromClient) .flatMap(connector -> @@ -193,17 +191,17 @@ public class KafkaConnectService { ); } - public Mono> getConnectorConfig(String clusterName, String connectName, + public Mono> getConnectorConfig(KafkaCluster cluster, String connectName, String connectorName) { - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).getConnectorConfig(connectorName) ); } - public Mono setConnectorConfig(String clusterName, String connectName, + public Mono setConnectorConfig(KafkaCluster cluster, String connectName, String connectorName, Mono requestBody) { - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMap(connect -> requestBody.flatMap(body -> KafkaConnectClients.withBaseUrl(connect) @@ -213,14 +211,15 @@ public class KafkaConnectService { ); } - public Mono deleteConnector(String clusterName, String connectName, String connectorName) { - return getConnectAddress(clusterName, connectName) + public Mono deleteConnector( + KafkaCluster cluster, String connectName, String connectorName) { + return getConnectAddress(cluster, connectName) .flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).deleteConnector(connectorName) ); } - public Mono updateConnectorState(String clusterName, String connectName, + public Mono updateConnectorState(KafkaCluster cluster, String connectName, String connectorName, ConnectorActionDTO action) { Function> kafkaClientCall; switch (action) { @@ -239,13 +238,13 @@ public class KafkaConnectService { default: throw new IllegalStateException("Unexpected value: " + action); } - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMap(kafkaClientCall); } - public Flux getConnectorTasks(String clusterName, String connectName, + public Flux getConnectorTasks(KafkaCluster cluster, String connectName, String connectorName) { - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMapMany(connect -> KafkaConnectClients.withBaseUrl(connect).getConnectorTasks(connectorName) .map(kafkaConnectMapper::fromClient) @@ -258,17 +257,17 @@ public class KafkaConnectService { ); } - public Mono restartConnectorTask(String clusterName, String connectName, + public Mono restartConnectorTask(KafkaCluster cluster, String connectName, String connectorName, Integer taskId) { - return getConnectAddress(clusterName, connectName) + return getConnectAddress(cluster, connectName) .flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).restartConnectorTask(connectorName, taskId) ); } - public Mono> getConnectorPlugins(String clusterName, + public Mono> getConnectorPlugins(KafkaCluster cluster, String connectName) { - return Mono.just(getConnectAddress(clusterName, connectName) + return Mono.just(getConnectAddress(cluster, connectName) .flatMapMany(connect -> KafkaConnectClients.withBaseUrl(connect).getConnectorPlugins() .map(kafkaConnectMapper::fromClient) @@ -276,8 +275,8 @@ public class KafkaConnectService { } public Mono validateConnectorPluginConfig( - String clusterName, String connectName, String pluginName, Mono requestBody) { - return getConnectAddress(clusterName, connectName) + KafkaCluster cluster, String connectName, String pluginName, Mono requestBody) { + return getConnectAddress(cluster, connectName) .flatMap(connect -> requestBody.flatMap(body -> KafkaConnectClients.withBaseUrl(connect) @@ -293,17 +292,11 @@ public class KafkaConnectService { .orElse(Mono.error(ClusterNotFoundException::new)); } - private Mono getConnectAddress(String clusterName, String connectName) { - return getCluster(clusterName) - .map(kafkaCluster -> - kafkaCluster.getKafkaConnect().stream() - .filter(connect -> connect.getName().equals(connectName)) - .findFirst() - .map(KafkaConnectCluster::getAddress) - ) - .flatMap(connect -> connect - .map(Mono::just) - .orElse(Mono.error(ConnectNotFoundException::new)) - ); + private Mono getConnectAddress(KafkaCluster cluster, String connectName) { + return Mono.justOrEmpty(cluster.getKafkaConnect().stream() + .filter(connect -> connect.getName().equals(connectName)) + .findFirst() + .map(KafkaConnectCluster::getAddress)) + .switchIfEmpty(Mono.error(ConnectNotFoundException::new)); } } diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java deleted file mode 100644 index 5b9cee3c85..0000000000 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java +++ /dev/null @@ -1,870 +0,0 @@ -package com.provectus.kafka.ui.service; - -import com.provectus.kafka.ui.exception.InvalidRequestApiException; -import com.provectus.kafka.ui.exception.LogDirNotFoundApiException; -import com.provectus.kafka.ui.exception.TopicMetadataException; -import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException; -import com.provectus.kafka.ui.exception.ValidationException; -import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO; -import com.provectus.kafka.ui.model.CleanupPolicy; -import com.provectus.kafka.ui.model.CreateTopicMessageDTO; -import com.provectus.kafka.ui.model.InternalBrokerDiskUsage; -import com.provectus.kafka.ui.model.InternalBrokerMetrics; -import com.provectus.kafka.ui.model.InternalClusterMetrics; -import com.provectus.kafka.ui.model.InternalConsumerGroup; -import com.provectus.kafka.ui.model.InternalPartition; -import com.provectus.kafka.ui.model.InternalReplica; -import com.provectus.kafka.ui.model.InternalSegmentSizeDto; -import com.provectus.kafka.ui.model.InternalTopic; -import com.provectus.kafka.ui.model.InternalTopicConfig; -import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.MetricDTO; -import com.provectus.kafka.ui.model.PartitionsIncreaseDTO; -import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO; -import com.provectus.kafka.ui.model.ServerStatusDTO; -import com.provectus.kafka.ui.model.TopicCreationDTO; -import com.provectus.kafka.ui.model.TopicUpdateDTO; -import com.provectus.kafka.ui.serde.DeserializationService; -import com.provectus.kafka.ui.serde.RecordSerDe; -import com.provectus.kafka.ui.util.ClusterUtil; -import com.provectus.kafka.ui.util.JmxClusterUtil; -import com.provectus.kafka.ui.util.JmxMetricsName; -import com.provectus.kafka.ui.util.JmxMetricsValueName; -import java.math.BigDecimal; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.List; -import java.util.LongSummaryStatistics; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; -import lombok.RequiredArgsConstructor; -import lombok.SneakyThrows; -import lombok.extern.log4j.Log4j2; -import org.apache.kafka.clients.admin.NewPartitionReassignment; -import org.apache.kafka.clients.admin.NewPartitions; -import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.TopicPartitionReplica; -import org.apache.kafka.common.errors.InvalidRequestException; -import org.apache.kafka.common.errors.LogDirNotFoundException; -import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.apache.kafka.common.header.Header; -import org.apache.kafka.common.header.internals.RecordHeader; -import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.requests.DescribeLogDirsResponse; -import org.apache.kafka.common.serialization.ByteArraySerializer; -import org.apache.kafka.common.serialization.BytesDeserializer; -import org.apache.kafka.common.utils.Bytes; -import org.springframework.stereotype.Service; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuple2; -import reactor.util.function.Tuple3; -import reactor.util.function.Tuples; - -@Service -@RequiredArgsConstructor -@Log4j2 -public class KafkaService { - - private final ZookeeperService zookeeperService; - private final JmxClusterUtil jmxClusterUtil; - private final ClustersStorage clustersStorage; - private final DeserializationService deserializationService; - private final AdminClientService adminClientService; - private final FeatureService featureService; - - - public KafkaCluster getUpdatedCluster(KafkaCluster cluster, InternalTopic updatedTopic) { - final Map topics = - Optional.ofNullable(cluster.getTopics()).map( - t -> new HashMap<>(cluster.getTopics()) - ).orElse(new HashMap<>()); - topics.put(updatedTopic.getName(), updatedTopic); - return cluster.toBuilder().topics(topics).build(); - } - - public KafkaCluster getUpdatedCluster(KafkaCluster cluster, String topicToDelete) { - final Map topics = new HashMap<>(cluster.getTopics()); - topics.remove(topicToDelete); - return cluster.toBuilder().topics(topics).build(); - } - - @SneakyThrows - public Mono getUpdatedCluster(KafkaCluster cluster) { - return adminClientService.get(cluster) - .flatMap( - ac -> ac.getClusterVersion().flatMap( - version -> - getClusterMetrics(ac) - .flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac)) - .flatMap(clusterMetrics -> - getTopicsData(ac).flatMap(it -> { - if (cluster.getDisableLogDirsCollection() == null - || !cluster.getDisableLogDirsCollection()) { - return updateSegmentMetrics(ac, clusterMetrics, it - ); - } else { - return emptySegmentMetrics(clusterMetrics, it); - } - } - ).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto)) - ) - ) - ).flatMap( - nc -> featureService.getAvailableFeatures(cluster).collectList() - .map(f -> nc.toBuilder().features(f).build()) - ).doOnError(e -> - log.error("Failed to collect cluster {} info", cluster.getName(), e) - ).onErrorResume( - e -> Mono.just(cluster.toBuilder() - .status(ServerStatusDTO.OFFLINE) - .lastKafkaException(e) - .build()) - ); - } - - private KafkaCluster buildFromData(KafkaCluster currentCluster, - String version, - InternalSegmentSizeDto segmentSizeDto) { - - var topics = segmentSizeDto.getInternalTopicWithSegmentSize(); - var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize(); - var brokersIds = new ArrayList<>(brokersMetrics.getInternalBrokerMetrics().keySet()); - - InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = - brokersMetrics.toBuilder(); - - InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics); - - ServerStatusDTO zookeeperStatus = ServerStatusDTO.OFFLINE; - Throwable zookeeperException = null; - try { - zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster) - ? ServerStatusDTO.ONLINE - : ServerStatusDTO.OFFLINE; - } catch (Throwable e) { - zookeeperException = e; - } - - InternalClusterMetrics clusterMetrics = metricsBuilder - .activeControllers(brokersMetrics.getActiveControllers()) - .topicCount(topicsMetrics.getTopicCount()) - .brokerCount(brokersMetrics.getBrokerCount()) - .underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount()) - .inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount()) - .outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount()) - .onlinePartitionCount(topicsMetrics.getOnlinePartitionCount()) - .offlinePartitionCount(topicsMetrics.getOfflinePartitionCount()) - .zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus)) - .version(version) - .build(); - - return currentCluster.toBuilder() - .version(version) - .status(ServerStatusDTO.ONLINE) - .zookeeperStatus(zookeeperStatus) - .lastZookeeperException(zookeeperException) - .lastKafkaException(null) - .metrics(clusterMetrics) - .topics(topics) - .brokers(brokersIds) - .build(); - } - - private InternalClusterMetrics collectTopicsMetrics(Map topics) { - - int underReplicatedPartitions = 0; - int inSyncReplicasCount = 0; - int outOfSyncReplicasCount = 0; - int onlinePartitionCount = 0; - int offlinePartitionCount = 0; - - for (InternalTopic topic : topics.values()) { - underReplicatedPartitions += topic.getUnderReplicatedPartitions(); - inSyncReplicasCount += topic.getInSyncReplicas(); - outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas()); - onlinePartitionCount += - topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1) - .sum(); - offlinePartitionCount += - topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1) - .sum(); - } - - return InternalClusterMetrics.builder() - .underReplicatedPartitionCount(underReplicatedPartitions) - .inSyncReplicasCount(inSyncReplicasCount) - .outOfSyncReplicasCount(outOfSyncReplicasCount) - .onlinePartitionCount(onlinePartitionCount) - .offlinePartitionCount(offlinePartitionCount) - .topicCount(topics.size()) - .build(); - } - - private Map mergeWithConfigs( - List topics, Map> configs) { - return topics.stream() - .map(t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build()) - .map(t -> t.toBuilder().cleanUpPolicy( - CleanupPolicy.fromString(t.getTopicConfigs().stream() - .filter(config -> config.getName().equals("cleanup.policy")) - .findFirst() - .orElseGet(() -> InternalTopicConfig.builder().value("unknown").build()) - .getValue())).build()) - .collect(Collectors.toMap( - InternalTopic::getName, - e -> e - )); - } - - @SneakyThrows - private Mono> getTopicsData(ReactiveAdminClient client) { - return client.listTopics(true) - .flatMap(topics -> getTopicsData(client, topics).collectList()); - } - - private Flux getTopicsData(ReactiveAdminClient client, Collection topics) { - final Mono>> configsMono = - loadTopicsConfig(client, topics); - - return client.describeTopics(topics) - .map(m -> m.values().stream() - .map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList())) - .flatMap(internalTopics -> configsMono - .map(configs -> mergeWithConfigs(internalTopics, configs).values())) - .flatMapMany(Flux::fromIterable); - } - - - private Mono getClusterMetrics(ReactiveAdminClient client) { - return client.describeCluster().map(desc -> - InternalClusterMetrics.builder() - .brokerCount(desc.getNodes().size()) - .activeControllers(desc.getController() != null ? 1 : 0) - .build() - ); - } - - @SneakyThrows - public Mono createTopic(ReactiveAdminClient adminClient, - Mono topicCreation) { - return topicCreation.flatMap(topicData -> - adminClient.createTopic( - topicData.getName(), - topicData.getPartitions(), - topicData.getReplicationFactor().shortValue(), - topicData.getConfigs() - ).thenReturn(topicData) - ) - .onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage()))) - .flatMap(topicData -> getUpdatedTopic(adminClient, topicData.getName())) - .switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic"))); - } - - public Mono createTopic( - KafkaCluster cluster, Mono topicCreation) { - return adminClientService.get(cluster).flatMap(ac -> createTopic(ac, topicCreation)); - } - - public Mono deleteTopic(KafkaCluster cluster, String topicName) { - return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName)); - } - - @SneakyThrows - private Mono>> loadTopicsConfig( - ReactiveAdminClient client, Collection topicNames) { - return client.getTopicsConfig(topicNames) - .map(configs -> - configs.entrySet().stream().collect(Collectors.toMap( - Map.Entry::getKey, - c -> c.getValue().stream() - .map(ClusterUtil::mapToInternalTopicConfig) - .collect(Collectors.toList())))); - } - - public Mono> getConsumerGroupsInternal(KafkaCluster cluster) { - return adminClientService.get(cluster).flatMap(ac -> - ac.listConsumerGroups() - .flatMap(groupIds -> getConsumerGroupsInternal(cluster, groupIds))); - } - - public Mono> getConsumerGroupsInternal(KafkaCluster cluster, - List groupIds) { - return adminClientService.get(cluster).flatMap(ac -> - ac.describeConsumerGroups(groupIds) - .map(Map::values) - .flatMap(descriptions -> - Flux.fromIterable(descriptions) - .parallel() - .flatMap(d -> - ac.listConsumerGroupOffsets(d.groupId()) - .map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets)) - ) - .sequential() - .collectList())); - } - - public Mono> getConsumerGroups( - KafkaCluster cluster, Optional topic, List groupIds) { - final Mono> consumerGroups; - - if (groupIds.isEmpty()) { - consumerGroups = getConsumerGroupsInternal(cluster); - } else { - consumerGroups = getConsumerGroupsInternal(cluster, groupIds); - } - - return consumerGroups.map(c -> - c.stream() - .map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic)) - .filter(Optional::isPresent) - .map(Optional::get) - .map(g -> - g.toBuilder().endOffsets( - topicPartitionsEndOffsets(cluster, g.getOffsets().keySet()) - ).build() - ) - .collect(Collectors.toList()) - ); - } - - public Map topicPartitionsEndOffsets( - KafkaCluster cluster, Collection topicPartitions) { - try (KafkaConsumer consumer = createConsumer(cluster)) { - return consumer.endOffsets(topicPartitions); - } - } - - public KafkaConsumer createConsumer(KafkaCluster cluster) { - return createConsumer(cluster, Map.of()); - } - - public KafkaConsumer createConsumer(KafkaCluster cluster, - Map properties) { - Properties props = new Properties(); - props.putAll(cluster.getProperties()); - props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID()); - props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); - props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class); - props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class); - props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - props.putAll(properties); - - return new KafkaConsumer<>(props); - } - - @SneakyThrows - public Mono updateTopic(KafkaCluster cluster, - String topicName, - TopicUpdateDTO topicUpdate) { - return adminClientService.get(cluster) - .flatMap(ac -> - ac.updateTopicConfig(topicName, - topicUpdate.getConfigs()).then(getUpdatedTopic(ac, topicName))); - } - - private Mono getUpdatedTopic(ReactiveAdminClient ac, String topicName) { - return getTopicsData(ac, List.of(topicName)).next(); - } - - private InternalTopic mergeWithStats(InternalTopic topic, - Map topics, - Map partitions) { - final LongSummaryStatistics stats = topics.get(topic.getName()); - - return topic.toBuilder() - .segmentSize(stats.getSum()) - .segmentCount(stats.getCount()) - .partitions( - topic.getPartitions().entrySet().stream().map(e -> - Tuples.of(e.getKey(), mergeWithStats(topic.getName(), e.getValue(), partitions)) - ).collect(Collectors.toMap( - Tuple2::getT1, - Tuple2::getT2 - )) - ).build(); - } - - private InternalPartition mergeWithStats(String topic, InternalPartition partition, - Map partitions) { - final LongSummaryStatistics stats = - partitions.get(new TopicPartition(topic, partition.getPartition())); - return partition.toBuilder() - .segmentSize(stats.getSum()) - .segmentCount(stats.getCount()) - .build(); - } - - private Mono emptySegmentMetrics(InternalClusterMetrics clusterMetrics, - List internalTopics) { - return Mono.just( - InternalSegmentSizeDto.builder() - .clusterMetricsWithSegmentSize( - clusterMetrics.toBuilder() - .segmentSize(0) - .segmentCount(0) - .internalBrokerDiskUsage(Collections.emptyMap()) - .build() - ) - .internalTopicWithSegmentSize( - internalTopics.stream().collect( - Collectors.toMap( - InternalTopic::getName, - i -> i - ) - ) - ).build() - ); - } - - private Mono updateSegmentMetrics(ReactiveAdminClient ac, - InternalClusterMetrics clusterMetrics, - List internalTopics) { - return ac.describeCluster().flatMap( - clusterDescription -> - ac.describeLogDirs().map(log -> { - final List> topicPartitions = - log.entrySet().stream().flatMap(b -> - b.getValue().entrySet().stream().flatMap(topicMap -> - topicMap.getValue().replicaInfos.entrySet().stream() - .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size)) - ) - ).collect(Collectors.toList()); - - final Map partitionStats = - topicPartitions.stream().collect( - Collectors.groupingBy( - Tuple2::getT2, - Collectors.summarizingLong(Tuple3::getT3) - ) - ); - - final Map topicStats = - topicPartitions.stream().collect( - Collectors.groupingBy( - t -> t.getT2().topic(), - Collectors.summarizingLong(Tuple3::getT3) - ) - ); - - final Map brokerStats = - topicPartitions.stream().collect( - Collectors.groupingBy( - Tuple2::getT1, - Collectors.summarizingLong(Tuple3::getT3) - ) - ); - - - final LongSummaryStatistics summary = - topicPartitions.stream().collect(Collectors.summarizingLong(Tuple3::getT3)); - - - final Map resultTopics = internalTopics.stream().map(e -> - Tuples.of(e.getName(), mergeWithStats(e, topicStats, partitionStats)) - ).collect(Collectors.toMap( - Tuple2::getT1, - Tuple2::getT2 - )); - - final Map resultBrokers = - brokerStats.entrySet().stream().map(e -> - Tuples.of(e.getKey(), InternalBrokerDiskUsage.builder() - .segmentSize(e.getValue().getSum()) - .segmentCount(e.getValue().getCount()) - .build() - ) - ).collect(Collectors.toMap( - Tuple2::getT1, - Tuple2::getT2 - )); - - return InternalSegmentSizeDto.builder() - .clusterMetricsWithSegmentSize( - clusterMetrics.toBuilder() - .segmentSize(summary.getSum()) - .segmentCount(summary.getCount()) - .internalBrokerDiskUsage(resultBrokers) - .build() - ) - .internalTopicWithSegmentSize(resultTopics).build(); - }) - ); - } - - public List getJmxMetric(String clusterName, Node node) { - return clustersStorage.getClusterByName(clusterName) - .filter(c -> c.getJmxPort() != null) - .filter(c -> c.getJmxPort() > 0) - .map(c -> jmxClusterUtil.getJmxMetrics(node.host(), c.getJmxPort(), c.isJmxSsl(), - c.getJmxUsername(), c.getJmxPassword())) - .orElse(Collections.emptyList()); - } - - private Mono fillJmxMetrics(InternalClusterMetrics internalClusterMetrics, - String clusterName, ReactiveAdminClient ac) { - return fillBrokerMetrics(internalClusterMetrics, clusterName, ac) - .map(this::calculateClusterMetrics); - } - - private Mono fillBrokerMetrics( - InternalClusterMetrics internalClusterMetrics, String clusterName, ReactiveAdminClient ac) { - return ac.describeCluster() - .flatMapIterable(desc -> desc.getNodes()) - .map(broker -> - Map.of(broker.id(), InternalBrokerMetrics.builder() - .metrics(getJmxMetric(clusterName, broker)).build()) - ) - .collectList() - .map(s -> internalClusterMetrics.toBuilder() - .internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build()); - } - - private InternalClusterMetrics calculateClusterMetrics( - InternalClusterMetrics internalClusterMetrics) { - final List metrics = internalClusterMetrics.getInternalBrokerMetrics().values() - .stream() - .flatMap(b -> b.getMetrics().stream()) - .collect( - Collectors.groupingBy( - MetricDTO::getCanonicalName, - Collectors.reducing(jmxClusterUtil::reduceJmxMetrics) - ) - ).values().stream() - .filter(Optional::isPresent) - .map(Optional::get) - .collect(Collectors.toList()); - final InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = - internalClusterMetrics.toBuilder().metrics(metrics); - metricsBuilder.bytesInPerSec(findTopicMetrics( - metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate - )); - metricsBuilder.bytesOutPerSec(findTopicMetrics( - metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate - )); - return metricsBuilder.build(); - } - - private Map findTopicMetrics(List metrics, - JmxMetricsName metricsName, - JmxMetricsValueName valueName) { - return metrics.stream().filter(m -> metricsName.name().equals(m.getName())) - .filter(m -> m.getParams().containsKey("topic")) - .filter(m -> m.getValue().containsKey(valueName.name())) - .map(m -> Tuples.of( - m.getParams().get("topic"), - m.getValue().get(valueName.name()) - )).collect(Collectors.groupingBy( - Tuple2::getT1, - Collectors.reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add) - )); - } - - public Map getTopicPartitions(KafkaCluster c, InternalTopic topic) { - var tps = topic.getPartitions().values().stream() - .map(t -> new TopicPartition(topic.getName(), t.getPartition())) - .collect(Collectors.toList()); - Map partitions = - topic.getPartitions().values().stream().collect(Collectors.toMap( - InternalPartition::getPartition, - tp -> tp - )); - - try (var consumer = createConsumer(c)) { - final Map earliest = consumer.beginningOffsets(tps); - final Map latest = consumer.endOffsets(tps); - - return tps.stream() - .map(tp -> partitions.get(tp.partition()).toBuilder() - .offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L)) - .offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L)) - .build() - ).collect(Collectors.toMap( - InternalPartition::getPartition, - tp -> tp - )); - } catch (Exception e) { - return Collections.emptyMap(); - } - } - - public Mono deleteTopicMessages(KafkaCluster cluster, Map offsets) { - return adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets)); - } - - public Mono sendMessage(KafkaCluster cluster, String topic, - CreateTopicMessageDTO msg) { - RecordSerDe serde = - deserializationService.getRecordDeserializerForCluster(cluster); - - Properties properties = new Properties(); - properties.putAll(cluster.getProperties()); - properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); - properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); - properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); - try (KafkaProducer producer = new KafkaProducer<>(properties)) { - ProducerRecord producerRecord = serde.serialize( - topic, - msg.getKey(), - msg.getContent(), - msg.getPartition() - ); - producerRecord = new ProducerRecord<>( - producerRecord.topic(), - producerRecord.partition(), - producerRecord.key(), - producerRecord.value(), - createHeaders(msg.getHeaders())); - - CompletableFuture cf = new CompletableFuture<>(); - producer.send(producerRecord, (metadata, exception) -> { - if (exception != null) { - cf.completeExceptionally(exception); - } else { - cf.complete(metadata); - } - }); - return Mono.fromFuture(cf); - } - } - - private Iterable
createHeaders(Map clientHeaders) { - if (clientHeaders == null) { - return null; - } - RecordHeaders headers = new RecordHeaders(); - clientHeaders.forEach((k, v) -> headers.add(new RecordHeader(k, v.getBytes()))); - return headers; - } - - public Mono increaseTopicPartitions( - KafkaCluster cluster, - String topicName, - PartitionsIncreaseDTO partitionsIncrease) { - return adminClientService.get(cluster) - .flatMap(ac -> { - Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount(); - Integer requestedCount = partitionsIncrease.getTotalPartitionsCount(); - - if (requestedCount < actualCount) { - return Mono.error( - new ValidationException(String.format( - "Topic currently has %s partitions, which is higher than the requested %s.", - actualCount, requestedCount))); - } - if (requestedCount.equals(actualCount)) { - return Mono.error( - new ValidationException( - String.format("Topic already has %s partitions.", actualCount))); - } - - Map newPartitionsMap = Collections.singletonMap( - topicName, - NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount()) - ); - return ac.createPartitions(newPartitionsMap) - .then(getUpdatedTopic(ac, topicName)); - }); - } - - private Mono changeReplicationFactor( - ReactiveAdminClient adminClient, - String topicName, - Map> reassignments - ) { - return adminClient.alterPartitionReassignments(reassignments) - .then(getUpdatedTopic(adminClient, topicName)); - } - - /** - * Change topic replication factor, works on brokers versions 5.4.x and higher - */ - public Mono changeReplicationFactor( - KafkaCluster cluster, - String topicName, - ReplicationFactorChangeDTO replicationFactorChange) { - return adminClientService.get(cluster) - .flatMap(ac -> { - Integer actual = cluster.getTopics().get(topicName).getReplicationFactor(); - Integer requested = replicationFactorChange.getTotalReplicationFactor(); - Integer brokersCount = cluster.getMetrics().getBrokerCount(); - - if (requested.equals(actual)) { - return Mono.error( - new ValidationException( - String.format("Topic already has replicationFactor %s.", actual))); - } - if (requested > brokersCount) { - return Mono.error( - new ValidationException( - String.format("Requested replication factor %s more than brokers count %s.", - requested, brokersCount))); - } - return changeReplicationFactor(ac, topicName, - getPartitionsReassignments(cluster, topicName, - replicationFactorChange)); - }); - } - - public Mono>> getClusterLogDirs( - KafkaCluster cluster, List reqBrokers) { - return adminClientService.get(cluster) - .flatMap(admin -> { - List brokers = new ArrayList<>(cluster.getBrokers()); - if (reqBrokers != null && !reqBrokers.isEmpty()) { - brokers.retainAll(reqBrokers); - } - return admin.describeLogDirs(brokers); - }) - .onErrorResume(TimeoutException.class, (TimeoutException e) -> { - log.error("Error during fetching log dirs", e); - return Mono.just(new HashMap<>()); - }); - } - - private Map> getPartitionsReassignments( - KafkaCluster cluster, - String topicName, - ReplicationFactorChangeDTO replicationFactorChange) { - // Current assignment map (Partition number -> List of brokers) - Map> currentAssignment = getCurrentAssignment(cluster, topicName); - // Brokers map (Broker id -> count) - Map brokersUsage = getBrokersMap(cluster, currentAssignment); - int currentReplicationFactor = cluster.getTopics().get(topicName).getReplicationFactor(); - - // If we should to increase Replication factor - if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) { - // For each partition - for (var assignmentList : currentAssignment.values()) { - // Get brokers list sorted by usage - var brokers = brokersUsage.entrySet().stream() - .sorted(Map.Entry.comparingByValue()) - .map(Map.Entry::getKey) - .collect(Collectors.toList()); - - // Iterate brokers and try to add them in assignment - // while (partition replicas count != requested replication factor) - for (Integer broker : brokers) { - if (!assignmentList.contains(broker)) { - assignmentList.add(broker); - brokersUsage.merge(broker, 1, Integer::sum); - } - if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) { - break; - } - } - if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) { - throw new ValidationException("Something went wrong during adding replicas"); - } - } - - // If we should to decrease Replication factor - } else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) { - for (Map.Entry> assignmentEntry : currentAssignment.entrySet()) { - var partition = assignmentEntry.getKey(); - var brokers = assignmentEntry.getValue(); - - // Get brokers list sorted by usage in reverse order - var brokersUsageList = brokersUsage.entrySet().stream() - .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder())) - .map(Map.Entry::getKey) - .collect(Collectors.toList()); - - // Iterate brokers and try to remove them from assignment - // while (partition replicas count != requested replication factor) - for (Integer broker : brokersUsageList) { - // Check is the broker the leader of partition - if (!cluster.getTopics().get(topicName).getPartitions().get(partition).getLeader() - .equals(broker)) { - brokers.remove(broker); - brokersUsage.merge(broker, -1, Integer::sum); - } - if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) { - break; - } - } - if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) { - throw new ValidationException("Something went wrong during removing replicas"); - } - } - } else { - throw new ValidationException("Replication factor already equals requested"); - } - - // Return result map - return currentAssignment.entrySet().stream().collect(Collectors.toMap( - e -> new TopicPartition(topicName, e.getKey()), - e -> Optional.of(new NewPartitionReassignment(e.getValue())) - )); - } - - private Map> getCurrentAssignment(KafkaCluster cluster, String topicName) { - return cluster.getTopics().get(topicName).getPartitions().values().stream() - .collect(Collectors.toMap( - InternalPartition::getPartition, - p -> p.getReplicas().stream() - .map(InternalReplica::getBroker) - .collect(Collectors.toList()) - )); - } - - private Map getBrokersMap(KafkaCluster cluster, - Map> currentAssignment) { - Map result = cluster.getBrokers().stream() - .collect(Collectors.toMap( - c -> c, - c -> 0 - )); - currentAssignment.values().forEach(brokers -> brokers - .forEach(broker -> result.put(broker, result.get(broker) + 1))); - - return result; - } - - public Mono updateBrokerLogDir(KafkaCluster cluster, Integer broker, - BrokerLogdirUpdateDTO brokerLogDir) { - return adminClientService.get(cluster) - .flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker)); - } - - private Mono updateBrokerLogDir(ReactiveAdminClient admin, - BrokerLogdirUpdateDTO b, - Integer broker) { - - Map req = Map.of( - new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker), - b.getLogDir()); - return admin.alterReplicaLogDirs(req) - .onErrorResume(UnknownTopicOrPartitionException.class, - e -> Mono.error(new TopicOrPartitionNotFoundException())) - .onErrorResume(LogDirNotFoundException.class, - e -> Mono.error(new LogDirNotFoundApiException())) - .doOnError(log::error); - } - - public Mono updateBrokerConfigByName(KafkaCluster cluster, - Integer broker, - String name, - String value) { - return adminClientService.get(cluster) - .flatMap(ac -> ac.updateBrokerConfigByName(broker, name, value)) - .onErrorResume(InvalidRequestException.class, - e -> Mono.error(new InvalidRequestApiException(e.getMessage()))) - .doOnError(log::error); - } -} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KsqlService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KsqlService.java index f7ff9b3ebb..7fe54bf03b 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KsqlService.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KsqlService.java @@ -17,13 +17,11 @@ import reactor.core.publisher.Mono; @RequiredArgsConstructor public class KsqlService { private final KsqlClient ksqlClient; - private final ClustersStorage clustersStorage; private final List ksqlStatementStrategies; - public Mono executeKsqlCommand(String clusterName, + public Mono executeKsqlCommand(KafkaCluster cluster, Mono ksqlCommand) { - return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName)) - .switchIfEmpty(Mono.error(ClusterNotFoundException::new)) + return Mono.justOrEmpty(cluster) .map(KafkaCluster::getKsqldbServer) .onErrorResume(e -> { Throwable throwable = diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java new file mode 100644 index 0000000000..038f5d6c36 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java @@ -0,0 +1,193 @@ +package com.provectus.kafka.ui.service; + +import com.provectus.kafka.ui.emitter.BackwardRecordEmitter; +import com.provectus.kafka.ui.emitter.ForwardRecordEmitter; +import com.provectus.kafka.ui.exception.TopicNotFoundException; +import com.provectus.kafka.ui.exception.ValidationException; +import com.provectus.kafka.ui.model.ConsumerPosition; +import com.provectus.kafka.ui.model.CreateTopicMessageDTO; +import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.model.SeekDirectionDTO; +import com.provectus.kafka.ui.model.TopicMessageDTO; +import com.provectus.kafka.ui.model.TopicMessageEventDTO; +import com.provectus.kafka.ui.serde.DeserializationService; +import com.provectus.kafka.ui.serde.RecordSerDe; +import com.provectus.kafka.ui.util.FilterTopicMessageEvents; +import com.provectus.kafka.ui.util.OffsetsSeekBackward; +import com.provectus.kafka.ui.util.OffsetsSeekForward; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import lombok.RequiredArgsConstructor; +import lombok.extern.log4j.Log4j2; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.header.internals.RecordHeader; +import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.utils.Bytes; +import org.springframework.stereotype.Service; +import org.springframework.util.StringUtils; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +@Service +@RequiredArgsConstructor +@Log4j2 +public class MessagesService { + + private static final int MAX_LOAD_RECORD_LIMIT = 100; + private static final int DEFAULT_LOAD_RECORD_LIMIT = 20; + + private final AdminClientService adminClientService; + private final DeserializationService deserializationService; + private final ConsumerGroupService consumerGroupService; + + public Mono deleteTopicMessages(KafkaCluster cluster, String topicName, + List partitionsToInclude) { + if (!cluster.getTopics().containsKey(topicName)) { + throw new TopicNotFoundException(); + } + return offsetsForDeletion(cluster, topicName, partitionsToInclude) + .flatMap(offsets -> + adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets))); + } + + private Mono> offsetsForDeletion(KafkaCluster cluster, String topicName, + List partitionsToInclude) { + return Mono.fromSupplier(() -> { + try (KafkaConsumer consumer = consumerGroupService.createConsumer(cluster)) { + return significantOffsets(consumer, topicName, partitionsToInclude); + } catch (Exception e) { + log.error("Error occurred while consuming records", e); + throw new RuntimeException(e); + } + }); + } + + public Mono sendMessage(KafkaCluster cluster, String topic, + CreateTopicMessageDTO msg) { + if (msg.getKey() == null && msg.getContent() == null) { + throw new ValidationException("Invalid message: both key and value can't be null"); + } + if (msg.getPartition() != null + && msg.getPartition() > cluster.getTopics().get(topic).getPartitionCount() - 1) { + throw new ValidationException("Invalid partition"); + } + RecordSerDe serde = + deserializationService.getRecordDeserializerForCluster(cluster); + + Properties properties = new Properties(); + properties.putAll(cluster.getProperties()); + properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers()); + properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class); + try (KafkaProducer producer = new KafkaProducer<>(properties)) { + ProducerRecord producerRecord = serde.serialize( + topic, + msg.getKey(), + msg.getContent(), + msg.getPartition() + ); + producerRecord = new ProducerRecord<>( + producerRecord.topic(), + producerRecord.partition(), + producerRecord.key(), + producerRecord.value(), + createHeaders(msg.getHeaders())); + + CompletableFuture cf = new CompletableFuture<>(); + producer.send(producerRecord, (metadata, exception) -> { + if (exception != null) { + cf.completeExceptionally(exception); + } else { + cf.complete(metadata); + } + }); + return Mono.fromFuture(cf); + } + } + + private Iterable
createHeaders(@Nullable Map clientHeaders) { + if (clientHeaders == null) { + return new RecordHeaders(); + } + RecordHeaders headers = new RecordHeaders(); + clientHeaders.forEach((k, v) -> headers.add(new RecordHeader(k, v.getBytes()))); + return headers; + } + + public Flux loadMessages(KafkaCluster cluster, String topic, + ConsumerPosition consumerPosition, String query, + Integer limit) { + int recordsLimit = Optional.ofNullable(limit) + .map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)) + .orElse(DEFAULT_LOAD_RECORD_LIMIT); + + java.util.function.Consumer> emitter; + RecordSerDe recordDeserializer = + deserializationService.getRecordDeserializerForCluster(cluster); + if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) { + emitter = new ForwardRecordEmitter( + () -> consumerGroupService.createConsumer(cluster), + new OffsetsSeekForward(topic, consumerPosition), + recordDeserializer + ); + } else { + emitter = new BackwardRecordEmitter( + (Map props) -> consumerGroupService.createConsumer(cluster, props), + new OffsetsSeekBackward(topic, consumerPosition, recordsLimit), + recordDeserializer + ); + } + return Flux.create(emitter) + .filter(m -> filterTopicMessage(m, query)) + .takeWhile(new FilterTopicMessageEvents(recordsLimit)) + .subscribeOn(Schedulers.elastic()) + .share(); + } + + /** + * returns end offsets for partitions where start offset != end offsets. + * This is useful when we need to verify that partition is not empty. + */ + public static Map significantOffsets(Consumer consumer, + String topicName, + Collection + partitionsToInclude) { + var partitions = consumer.partitionsFor(topicName).stream() + .filter(p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition())) + .map(p -> new TopicPartition(topicName, p.partition())) + .collect(Collectors.toList()); + var beginningOffsets = consumer.beginningOffsets(partitions); + var endOffsets = consumer.endOffsets(partitions); + return endOffsets.entrySet().stream() + .filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private boolean filterTopicMessage(TopicMessageEventDTO message, String query) { + if (StringUtils.isEmpty(query) + || !message.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) { + return true; + } + + final TopicMessageDTO msg = message.getMessage(); + return (!StringUtils.isEmpty(msg.getKey()) && msg.getKey().contains(query)) + || (!StringUtils.isEmpty(msg.getContent()) && msg.getContent().contains(query)); + } + +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsService.java new file mode 100644 index 0000000000..78fe5073af --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsService.java @@ -0,0 +1,363 @@ +package com.provectus.kafka.ui.service; + +import com.provectus.kafka.ui.model.InternalBrokerDiskUsage; +import com.provectus.kafka.ui.model.InternalBrokerMetrics; +import com.provectus.kafka.ui.model.InternalClusterMetrics; +import com.provectus.kafka.ui.model.InternalPartition; +import com.provectus.kafka.ui.model.InternalSegmentSizeDto; +import com.provectus.kafka.ui.model.InternalTopic; +import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.model.MetricDTO; +import com.provectus.kafka.ui.model.ServerStatusDTO; +import com.provectus.kafka.ui.util.ClusterUtil; +import com.provectus.kafka.ui.util.JmxClusterUtil; +import com.provectus.kafka.ui.util.JmxMetricsName; +import com.provectus.kafka.ui.util.JmxMetricsValueName; +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.LongSummaryStatistics; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import lombok.extern.log4j.Log4j2; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.springframework.stereotype.Service; +import reactor.core.publisher.Mono; +import reactor.util.function.Tuple2; +import reactor.util.function.Tuple3; +import reactor.util.function.Tuples; + +@Service +@RequiredArgsConstructor +@Log4j2 +public class MetricsService { + + private final ZookeeperService zookeeperService; + private final JmxClusterUtil jmxClusterUtil; + private final AdminClientService adminClientService; + private final FeatureService featureService; + private final TopicsService topicsService; + + /** + * Updates cluster's metrics and topics structure. + * @param cluster to be updated + * @return cluster with up-to-date metrics and topics structure + */ + public Mono updateClusterMetrics(KafkaCluster cluster) { + return adminClientService.get(cluster) + .flatMap( + ac -> ac.getClusterVersion().flatMap( + version -> + getClusterMetrics(ac) + .flatMap(i -> fillJmxMetrics(i, cluster, ac)) + .flatMap(clusterMetrics -> + topicsService.getTopicsData(ac).flatMap(it -> { + if (cluster.getDisableLogDirsCollection() == null + || !cluster.getDisableLogDirsCollection()) { + return updateSegmentMetrics(ac, clusterMetrics, it + ); + } else { + return emptySegmentMetrics(clusterMetrics, it); + } + } + ).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto)) + ) + ) + ).flatMap( + nc -> featureService.getAvailableFeatures(cluster).collectList() + .map(f -> nc.toBuilder().features(f).build()) + ).doOnError(e -> + log.error("Failed to collect cluster {} info", cluster.getName(), e) + ).onErrorResume( + e -> Mono.just(cluster.toBuilder() + .status(ServerStatusDTO.OFFLINE) + .lastKafkaException(e) + .build()) + ); + } + + private KafkaCluster buildFromData(KafkaCluster currentCluster, + String version, + InternalSegmentSizeDto segmentSizeDto) { + + var topics = segmentSizeDto.getInternalTopicWithSegmentSize(); + var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize(); + var brokersIds = new ArrayList<>(brokersMetrics.getInternalBrokerMetrics().keySet()); + + InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = + brokersMetrics.toBuilder(); + + InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics); + + ServerStatusDTO zookeeperStatus = ServerStatusDTO.OFFLINE; + Throwable zookeeperException = null; + try { + zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster) + ? ServerStatusDTO.ONLINE + : ServerStatusDTO.OFFLINE; + } catch (Throwable e) { + zookeeperException = e; + } + + InternalClusterMetrics clusterMetrics = metricsBuilder + .activeControllers(brokersMetrics.getActiveControllers()) + .topicCount(topicsMetrics.getTopicCount()) + .brokerCount(brokersMetrics.getBrokerCount()) + .underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount()) + .inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount()) + .outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount()) + .onlinePartitionCount(topicsMetrics.getOnlinePartitionCount()) + .offlinePartitionCount(topicsMetrics.getOfflinePartitionCount()) + .zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus)) + .version(version) + .build(); + + return currentCluster.toBuilder() + .version(version) + .status(ServerStatusDTO.ONLINE) + .zookeeperStatus(zookeeperStatus) + .lastZookeeperException(zookeeperException) + .lastKafkaException(null) + .metrics(clusterMetrics) + .topics(topics) + .brokers(brokersIds) + .build(); + } + + private InternalClusterMetrics collectTopicsMetrics(Map topics) { + + int underReplicatedPartitions = 0; + int inSyncReplicasCount = 0; + int outOfSyncReplicasCount = 0; + int onlinePartitionCount = 0; + int offlinePartitionCount = 0; + + for (InternalTopic topic : topics.values()) { + underReplicatedPartitions += topic.getUnderReplicatedPartitions(); + inSyncReplicasCount += topic.getInSyncReplicas(); + outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas()); + onlinePartitionCount += + topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1) + .sum(); + offlinePartitionCount += + topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1) + .sum(); + } + + return InternalClusterMetrics.builder() + .underReplicatedPartitionCount(underReplicatedPartitions) + .inSyncReplicasCount(inSyncReplicasCount) + .outOfSyncReplicasCount(outOfSyncReplicasCount) + .onlinePartitionCount(onlinePartitionCount) + .offlinePartitionCount(offlinePartitionCount) + .topicCount(topics.size()) + .build(); + } + + private Mono getClusterMetrics(ReactiveAdminClient client) { + return client.describeCluster().map(desc -> + InternalClusterMetrics.builder() + .brokerCount(desc.getNodes().size()) + .activeControllers(desc.getController() != null ? 1 : 0) + .build() + ); + } + + private InternalTopic mergeWithStats(InternalTopic topic, + Map topics, + Map partitions) { + final LongSummaryStatistics stats = topics.get(topic.getName()); + + return topic.toBuilder() + .segmentSize(stats.getSum()) + .segmentCount(stats.getCount()) + .partitions( + topic.getPartitions().entrySet().stream().map(e -> + Tuples.of(e.getKey(), mergeWithStats(topic.getName(), e.getValue(), partitions)) + ).collect(Collectors.toMap( + Tuple2::getT1, + Tuple2::getT2 + )) + ).build(); + } + + private InternalPartition mergeWithStats(String topic, InternalPartition partition, + Map partitions) { + final LongSummaryStatistics stats = + partitions.get(new TopicPartition(topic, partition.getPartition())); + return partition.toBuilder() + .segmentSize(stats.getSum()) + .segmentCount(stats.getCount()) + .build(); + } + + private Mono emptySegmentMetrics(InternalClusterMetrics clusterMetrics, + List internalTopics) { + return Mono.just( + InternalSegmentSizeDto.builder() + .clusterMetricsWithSegmentSize( + clusterMetrics.toBuilder() + .segmentSize(0) + .segmentCount(0) + .internalBrokerDiskUsage(Collections.emptyMap()) + .build() + ) + .internalTopicWithSegmentSize( + internalTopics.stream().collect( + Collectors.toMap( + InternalTopic::getName, + i -> i + ) + ) + ).build() + ); + } + + private Mono updateSegmentMetrics(ReactiveAdminClient ac, + InternalClusterMetrics clusterMetrics, + List internalTopics) { + return ac.describeCluster().flatMap( + clusterDescription -> + ac.describeLogDirs().map(log -> { + final List> topicPartitions = + log.entrySet().stream().flatMap(b -> + b.getValue().entrySet().stream().flatMap(topicMap -> + topicMap.getValue().replicaInfos.entrySet().stream() + .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size)) + ) + ).collect(Collectors.toList()); + + final Map partitionStats = + topicPartitions.stream().collect( + Collectors.groupingBy( + Tuple2::getT2, + Collectors.summarizingLong(Tuple3::getT3) + ) + ); + + final Map topicStats = + topicPartitions.stream().collect( + Collectors.groupingBy( + t -> t.getT2().topic(), + Collectors.summarizingLong(Tuple3::getT3) + ) + ); + + final Map brokerStats = + topicPartitions.stream().collect( + Collectors.groupingBy( + Tuple2::getT1, + Collectors.summarizingLong(Tuple3::getT3) + ) + ); + + + final LongSummaryStatistics summary = + topicPartitions.stream().collect(Collectors.summarizingLong(Tuple3::getT3)); + + + final Map resultTopics = internalTopics.stream().map(e -> + Tuples.of(e.getName(), mergeWithStats(e, topicStats, partitionStats)) + ).collect(Collectors.toMap( + Tuple2::getT1, + Tuple2::getT2 + )); + + final Map resultBrokers = + brokerStats.entrySet().stream().map(e -> + Tuples.of(e.getKey(), InternalBrokerDiskUsage.builder() + .segmentSize(e.getValue().getSum()) + .segmentCount(e.getValue().getCount()) + .build() + ) + ).collect(Collectors.toMap( + Tuple2::getT1, + Tuple2::getT2 + )); + + return InternalSegmentSizeDto.builder() + .clusterMetricsWithSegmentSize( + clusterMetrics.toBuilder() + .segmentSize(summary.getSum()) + .segmentCount(summary.getCount()) + .internalBrokerDiskUsage(resultBrokers) + .build() + ) + .internalTopicWithSegmentSize(resultTopics).build(); + }) + ); + } + + private List getJmxMetric(KafkaCluster cluster, Node node) { + return Optional.of(cluster) + .filter(c -> c.getJmxPort() != null) + .filter(c -> c.getJmxPort() > 0) + .map(c -> jmxClusterUtil.getJmxMetrics(node.host(), c.getJmxPort(), c.isJmxSsl(), + c.getJmxUsername(), c.getJmxPassword())) + .orElse(Collections.emptyList()); + } + + private Mono fillJmxMetrics(InternalClusterMetrics internalClusterMetrics, + KafkaCluster cluster, + ReactiveAdminClient ac) { + return fillBrokerMetrics(internalClusterMetrics, cluster, ac) + .map(this::calculateClusterMetrics); + } + + private Mono fillBrokerMetrics( + InternalClusterMetrics internalClusterMetrics, KafkaCluster cluster, ReactiveAdminClient ac) { + return ac.describeCluster() + .flatMapIterable(ReactiveAdminClient.ClusterDescription::getNodes) + .map(broker -> + Map.of(broker.id(), InternalBrokerMetrics.builder() + .metrics(getJmxMetric(cluster, broker)).build()) + ) + .collectList() + .map(s -> internalClusterMetrics.toBuilder() + .internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build()); + } + + private InternalClusterMetrics calculateClusterMetrics( + InternalClusterMetrics internalClusterMetrics) { + final List metrics = internalClusterMetrics.getInternalBrokerMetrics().values() + .stream() + .flatMap(b -> b.getMetrics().stream()) + .collect( + Collectors.groupingBy( + MetricDTO::getCanonicalName, + Collectors.reducing(jmxClusterUtil::reduceJmxMetrics) + ) + ).values().stream() + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); + final InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = + internalClusterMetrics.toBuilder().metrics(metrics); + metricsBuilder.bytesInPerSec(findTopicMetrics( + metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate + )); + metricsBuilder.bytesOutPerSec(findTopicMetrics( + metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate + )); + return metricsBuilder.build(); + } + + private Map findTopicMetrics(List metrics, + JmxMetricsName metricsName, + JmxMetricsValueName valueName) { + return metrics.stream().filter(m -> metricsName.name().equals(m.getName())) + .filter(m -> m.getParams().containsKey("topic")) + .filter(m -> m.getValue().containsKey(valueName.name())) + .map(m -> Tuples.of( + m.getParams().get("topic"), + m.getValue().get(valueName.name()) + )).collect(Collectors.groupingBy( + Tuple2::getT1, + Collectors.reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add) + )); + } +} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsUpdateService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsUpdateService.java deleted file mode 100644 index c4462aac9b..0000000000 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsUpdateService.java +++ /dev/null @@ -1,20 +0,0 @@ -package com.provectus.kafka.ui.service; - -import com.provectus.kafka.ui.model.KafkaCluster; -import lombok.RequiredArgsConstructor; -import lombok.extern.log4j.Log4j2; -import org.springframework.stereotype.Service; -import reactor.core.publisher.Mono; - -@Service -@RequiredArgsConstructor -@Log4j2 -public class MetricsUpdateService { - - private final KafkaService kafkaService; - - public Mono updateMetrics(KafkaCluster kafkaCluster) { - log.debug("Start getting metrics for kafkaCluster: {}", kafkaCluster.getName()); - return kafkaService.getUpdatedCluster(kafkaCluster); - } -} diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java index 0d1e81e9e0..16b95bdb6f 100644 --- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java @@ -4,6 +4,8 @@ import static com.google.common.util.concurrent.Uninterruptibles.getUninterrupti import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toMap; +import com.provectus.kafka.ui.exception.IllegalEntityStateException; +import com.provectus.kafka.ui.exception.NotFoundException; import com.provectus.kafka.ui.util.MapUtil; import com.provectus.kafka.ui.util.NumberUtil; import java.io.Closeable; @@ -40,6 +42,8 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.TopicPartitionReplica; import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.GroupIdNotFoundException; +import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.requests.DescribeLogDirsResponse; import reactor.core.publisher.Mono; @@ -186,7 +190,11 @@ public class ReactiveAdminClient implements Closeable { } public Mono deleteConsumerGroups(Collection groupIds) { - return toMono(client.deleteConsumerGroups(groupIds).all()); + return toMono(client.deleteConsumerGroups(groupIds).all()) + .onErrorResume(GroupIdNotFoundException.class, + th -> Mono.error(new NotFoundException("The group id does not exist"))) + .onErrorResume(GroupNotEmptyException.class, + th -> Mono.error(new IllegalEntityStateException("The group is not empty"))); } public Mono createTopic(String name, diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java new file mode 100644 index 0000000000..2254439339 --- /dev/null +++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java @@ -0,0 +1,443 @@ +package com.provectus.kafka.ui.service; + +import com.provectus.kafka.ui.exception.TopicMetadataException; +import com.provectus.kafka.ui.exception.TopicNotFoundException; +import com.provectus.kafka.ui.exception.ValidationException; +import com.provectus.kafka.ui.mapper.ClusterMapper; +import com.provectus.kafka.ui.model.CleanupPolicy; +import com.provectus.kafka.ui.model.Feature; +import com.provectus.kafka.ui.model.InternalPartition; +import com.provectus.kafka.ui.model.InternalReplica; +import com.provectus.kafka.ui.model.InternalTopic; +import com.provectus.kafka.ui.model.InternalTopicConfig; +import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.model.PartitionsIncreaseDTO; +import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO; +import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO; +import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO; +import com.provectus.kafka.ui.model.TopicColumnsToSortDTO; +import com.provectus.kafka.ui.model.TopicConfigDTO; +import com.provectus.kafka.ui.model.TopicCreationDTO; +import com.provectus.kafka.ui.model.TopicDTO; +import com.provectus.kafka.ui.model.TopicDetailsDTO; +import com.provectus.kafka.ui.model.TopicMessageSchemaDTO; +import com.provectus.kafka.ui.model.TopicUpdateDTO; +import com.provectus.kafka.ui.model.TopicsResponseDTO; +import com.provectus.kafka.ui.serde.DeserializationService; +import com.provectus.kafka.ui.util.ClusterUtil; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import lombok.RequiredArgsConstructor; +import lombok.SneakyThrows; +import org.apache.commons.lang3.StringUtils; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.common.TopicPartition; +import org.springframework.stereotype.Service; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +@Service +@RequiredArgsConstructor +public class TopicsService { + + private static final Integer DEFAULT_PAGE_SIZE = 25; + + private final AdminClientService adminClientService; + private final ConsumerGroupService consumerGroupService; + private final ClustersStorage clustersStorage; + private final ClusterMapper clusterMapper; + private final DeserializationService deserializationService; + + public TopicsResponseDTO getTopics(KafkaCluster cluster, + Optional page, + Optional nullablePerPage, + Optional showInternal, + Optional search, + Optional sortBy) { + Predicate positiveInt = i -> i > 0; + int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE); + var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage; + List topics = cluster.getTopics().values().stream() + .filter(topic -> !topic.isInternal() + || showInternal + .map(i -> topic.isInternal() == i) + .orElse(true)) + .filter(topic -> + search + .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s)) + .orElse(true)) + .sorted(getComparatorForTopic(sortBy)) + .collect(Collectors.toList()); + var totalPages = (topics.size() / perPage) + + (topics.size() % perPage == 0 ? 0 : 1); + return new TopicsResponseDTO() + .pageCount(totalPages) + .topics( + topics.stream() + .skip(topicsToSkip) + .limit(perPage) + .map(t -> + clusterMapper.toTopic( + t.toBuilder().partitions(getTopicPartitions(cluster, t)).build() + ) + ) + .collect(Collectors.toList()) + ); + } + + private Comparator getComparatorForTopic(Optional sortBy) { + var defaultComparator = Comparator.comparing(InternalTopic::getName); + if (sortBy.isEmpty()) { + return defaultComparator; + } + switch (sortBy.get()) { + case TOTAL_PARTITIONS: + return Comparator.comparing(InternalTopic::getPartitionCount); + case OUT_OF_SYNC_REPLICAS: + return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas()); + case REPLICATION_FACTOR: + return Comparator.comparing(InternalTopic::getReplicationFactor); + case NAME: + default: + return defaultComparator; + } + } + + public Optional getTopicDetails(KafkaCluster cluster, String topicName) { + return Optional.ofNullable(cluster.getTopics()).map(l -> l.get(topicName)).map( + t -> t.toBuilder().partitions(getTopicPartitions(cluster, t) + ).build() + ).map(t -> clusterMapper.toTopicDetails(t, cluster.getMetrics())); + } + + @SneakyThrows + public Mono> getTopicsData(ReactiveAdminClient client) { + return client.listTopics(true) + .flatMap(topics -> getTopicsData(client, topics).collectList()); + } + + private Flux getTopicsData(ReactiveAdminClient client, Collection topics) { + final Mono>> configsMono = + loadTopicsConfig(client, topics); + + return client.describeTopics(topics) + .map(m -> m.values().stream() + .map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList())) + .flatMap(internalTopics -> configsMono + .map(configs -> mergeWithConfigs(internalTopics, configs).values())) + .flatMapMany(Flux::fromIterable); + } + + public Optional> getTopicConfigs(KafkaCluster cluster, String topicName) { + return Optional.of(cluster) + .map(KafkaCluster::getTopics) + .map(t -> t.get(topicName)) + .map(t -> t.getTopicConfigs().stream().map(clusterMapper::toTopicConfig) + .collect(Collectors.toList())); + } + + + @SneakyThrows + private Mono createTopic(ReactiveAdminClient adminClient, + Mono topicCreation) { + return topicCreation.flatMap(topicData -> + adminClient.createTopic( + topicData.getName(), + topicData.getPartitions(), + topicData.getReplicationFactor().shortValue(), + topicData.getConfigs() + ).thenReturn(topicData) + ) + .onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage()))) + .flatMap(topicData -> getUpdatedTopic(adminClient, topicData.getName())) + .switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic"))); + } + + public Mono createTopic( + KafkaCluster cluster, Mono topicCreation) { + return adminClientService.get(cluster).flatMap(ac -> createTopic(ac, topicCreation)) + .doOnNext(t -> clustersStorage.onTopicUpdated(cluster, t)) + .map(clusterMapper::toTopic); + } + + private Map mergeWithConfigs( + List topics, Map> configs) { + return topics.stream() + .map(t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build()) + .map(t -> t.toBuilder().cleanUpPolicy( + CleanupPolicy.fromString(t.getTopicConfigs().stream() + .filter(config -> config.getName().equals("cleanup.policy")) + .findFirst() + .orElseGet(() -> InternalTopicConfig.builder().value("unknown").build()) + .getValue())).build()) + .collect(Collectors.toMap( + InternalTopic::getName, + e -> e + )); + } + + public Mono getUpdatedTopic(ReactiveAdminClient ac, String topicName) { + return getTopicsData(ac, List.of(topicName)).next(); + } + + public Mono updateTopic(KafkaCluster cluster, + String topicName, + TopicUpdateDTO topicUpdate) { + return adminClientService.get(cluster) + .flatMap(ac -> + ac.updateTopicConfig(topicName, + topicUpdate.getConfigs()).then(getUpdatedTopic(ac, topicName))); + } + + public Mono updateTopic(KafkaCluster cl, String topicName, + Mono topicUpdate) { + return topicUpdate + .flatMap(t -> updateTopic(cl, topicName, t)) + .doOnNext(t -> clustersStorage.onTopicUpdated(cl, t)) + .map(clusterMapper::toTopic); + } + + @SneakyThrows + private Mono>> loadTopicsConfig( + ReactiveAdminClient client, Collection topicNames) { + return client.getTopicsConfig(topicNames) + .map(configs -> + configs.entrySet().stream().collect(Collectors.toMap( + Map.Entry::getKey, + c -> c.getValue().stream() + .map(ClusterUtil::mapToInternalTopicConfig) + .collect(Collectors.toList())))); + } + + private Mono changeReplicationFactor( + ReactiveAdminClient adminClient, + String topicName, + Map> reassignments + ) { + return adminClient.alterPartitionReassignments(reassignments) + .then(getUpdatedTopic(adminClient, topicName)); + } + + /** + * Change topic replication factor, works on brokers versions 5.4.x and higher + */ + public Mono changeReplicationFactor( + KafkaCluster cluster, + String topicName, + ReplicationFactorChangeDTO replicationFactorChange) { + return adminClientService.get(cluster) + .flatMap(ac -> { + Integer actual = cluster.getTopics().get(topicName).getReplicationFactor(); + Integer requested = replicationFactorChange.getTotalReplicationFactor(); + Integer brokersCount = cluster.getMetrics().getBrokerCount(); + + if (requested.equals(actual)) { + return Mono.error( + new ValidationException( + String.format("Topic already has replicationFactor %s.", actual))); + } + if (requested > brokersCount) { + return Mono.error( + new ValidationException( + String.format("Requested replication factor %s more than brokers count %s.", + requested, brokersCount))); + } + return changeReplicationFactor(ac, topicName, + getPartitionsReassignments(cluster, topicName, + replicationFactorChange)); + }) + .doOnNext(topic -> clustersStorage.onTopicUpdated(cluster, topic)) + .map(t -> new ReplicationFactorChangeResponseDTO() + .topicName(t.getName()) + .totalReplicationFactor(t.getReplicationFactor())); + } + + private Map> getPartitionsReassignments( + KafkaCluster cluster, + String topicName, + ReplicationFactorChangeDTO replicationFactorChange) { + // Current assignment map (Partition number -> List of brokers) + Map> currentAssignment = getCurrentAssignment(cluster, topicName); + // Brokers map (Broker id -> count) + Map brokersUsage = getBrokersMap(cluster, currentAssignment); + int currentReplicationFactor = cluster.getTopics().get(topicName).getReplicationFactor(); + + // If we should to increase Replication factor + if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) { + // For each partition + for (var assignmentList : currentAssignment.values()) { + // Get brokers list sorted by usage + var brokers = brokersUsage.entrySet().stream() + .sorted(Map.Entry.comparingByValue()) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + + // Iterate brokers and try to add them in assignment + // while (partition replicas count != requested replication factor) + for (Integer broker : brokers) { + if (!assignmentList.contains(broker)) { + assignmentList.add(broker); + brokersUsage.merge(broker, 1, Integer::sum); + } + if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) { + break; + } + } + if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) { + throw new ValidationException("Something went wrong during adding replicas"); + } + } + + // If we should to decrease Replication factor + } else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) { + for (Map.Entry> assignmentEntry : currentAssignment.entrySet()) { + var partition = assignmentEntry.getKey(); + var brokers = assignmentEntry.getValue(); + + // Get brokers list sorted by usage in reverse order + var brokersUsageList = brokersUsage.entrySet().stream() + .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder())) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + + // Iterate brokers and try to remove them from assignment + // while (partition replicas count != requested replication factor) + for (Integer broker : brokersUsageList) { + // Check is the broker the leader of partition + if (!cluster.getTopics().get(topicName).getPartitions().get(partition).getLeader() + .equals(broker)) { + brokers.remove(broker); + brokersUsage.merge(broker, -1, Integer::sum); + } + if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) { + break; + } + } + if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) { + throw new ValidationException("Something went wrong during removing replicas"); + } + } + } else { + throw new ValidationException("Replication factor already equals requested"); + } + + // Return result map + return currentAssignment.entrySet().stream().collect(Collectors.toMap( + e -> new TopicPartition(topicName, e.getKey()), + e -> Optional.of(new NewPartitionReassignment(e.getValue())) + )); + } + + private Map> getCurrentAssignment(KafkaCluster cluster, String topicName) { + return cluster.getTopics().get(topicName).getPartitions().values().stream() + .collect(Collectors.toMap( + InternalPartition::getPartition, + p -> p.getReplicas().stream() + .map(InternalReplica::getBroker) + .collect(Collectors.toList()) + )); + } + + private Map getBrokersMap(KafkaCluster cluster, + Map> currentAssignment) { + Map result = cluster.getBrokers().stream() + .collect(Collectors.toMap( + c -> c, + c -> 0 + )); + currentAssignment.values().forEach(brokers -> brokers + .forEach(broker -> result.put(broker, result.get(broker) + 1))); + + return result; + } + + public Mono increaseTopicPartitions( + KafkaCluster cluster, + String topicName, + PartitionsIncreaseDTO partitionsIncrease) { + return adminClientService.get(cluster) + .flatMap(ac -> { + Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount(); + Integer requestedCount = partitionsIncrease.getTotalPartitionsCount(); + + if (requestedCount < actualCount) { + return Mono.error( + new ValidationException(String.format( + "Topic currently has %s partitions, which is higher than the requested %s.", + actualCount, requestedCount))); + } + if (requestedCount.equals(actualCount)) { + return Mono.error( + new ValidationException( + String.format("Topic already has %s partitions.", actualCount))); + } + + Map newPartitionsMap = Collections.singletonMap( + topicName, + NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount()) + ); + return ac.createPartitions(newPartitionsMap) + .then(getUpdatedTopic(ac, topicName)); + }) + .doOnNext(t -> clustersStorage.onTopicUpdated(cluster, t)) + .map(t -> new PartitionsIncreaseResponseDTO() + .topicName(t.getName()) + .totalPartitionsCount(t.getPartitionCount())); + } + + private Map getTopicPartitions(KafkaCluster c, InternalTopic topic) { + var tps = topic.getPartitions().values().stream() + .map(t -> new TopicPartition(topic.getName(), t.getPartition())) + .collect(Collectors.toList()); + Map partitions = + topic.getPartitions().values().stream().collect(Collectors.toMap( + InternalPartition::getPartition, + tp -> tp + )); + + try (var consumer = consumerGroupService.createConsumer(c)) { + final Map earliest = consumer.beginningOffsets(tps); + final Map latest = consumer.endOffsets(tps); + + return tps.stream() + .map(tp -> partitions.get(tp.partition()).toBuilder() + .offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L)) + .offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L)) + .build() + ).collect(Collectors.toMap( + InternalPartition::getPartition, + tp -> tp + )); + } catch (Exception e) { + return Collections.emptyMap(); + } + } + + public Mono deleteTopic(KafkaCluster cluster, String topicName) { + var topicDetails = getTopicDetails(cluster, topicName) + .orElseThrow(TopicNotFoundException::new); + if (cluster.getFeatures().contains(Feature.TOPIC_DELETION)) { + return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName)) + .doOnSuccess(t -> clustersStorage.onTopicDeleted(cluster, topicName)); + } else { + return Mono.error(new ValidationException("Topic deletion restricted")); + } + } + + public TopicMessageSchemaDTO getTopicSchema(KafkaCluster cluster, String topicName) { + if (!cluster.getTopics().containsKey(topicName)) { + throw new TopicNotFoundException(); + } + return deserializationService + .getRecordDeserializerForCluster(cluster) + .getTopicSchema(topicName); + } + +} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java deleted file mode 100644 index 78417e9be5..0000000000 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java +++ /dev/null @@ -1,303 +0,0 @@ -package com.provectus.kafka.ui.service; - -import static org.assertj.core.api.Assertions.assertThat; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; - -import com.provectus.kafka.ui.mapper.ClusterMapper; -import com.provectus.kafka.ui.model.InternalTopic; -import com.provectus.kafka.ui.model.InternalTopicConfig; -import com.provectus.kafka.ui.model.KafkaCluster; -import com.provectus.kafka.ui.model.TopicColumnsToSortDTO; -import com.provectus.kafka.ui.model.TopicDTO; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.UUID; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.apache.kafka.clients.admin.ConfigEntry; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mapstruct.factory.Mappers; -import org.mockito.InjectMocks; -import org.mockito.Mock; -import org.mockito.Spy; -import org.mockito.junit.jupiter.MockitoExtension; - -@ExtendWith(MockitoExtension.class) -class ClusterServiceTest { - @Spy - private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class); - @InjectMocks - private ClusterService clusterService; - @Mock - private ClustersStorage clustersStorage; - @Mock - private KafkaService kafkaService; - - @Test - public void shouldListFirst25Topics() { - var topicName = UUID.randomUUID().toString(); - - final KafkaCluster cluster = KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .build())) - ) - .build(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(cluster)); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - var topics = clusterService.getTopics(topicName, - Optional.empty(), Optional.empty(), Optional.empty(), - Optional.empty(), Optional.empty()); - assertThat(topics.getPageCount()).isEqualTo(4); - assertThat(topics.getTopics()).hasSize(25); - assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); - } - - @Test - public void shouldCalculateCorrectPageCountForNonDivisiblePageSize() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .build())) - ) - .build())); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - - var topics = clusterService.getTopics(topicName, Optional.of(4), Optional.of(33), - Optional.empty(), Optional.empty(), Optional.empty()); - assertThat(topics.getPageCount()).isEqualTo(4); - assertThat(topics.getTopics()).hasSize(1) - .first().extracting(TopicDTO::getName).isEqualTo("99"); - } - - @Test - public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .build())) - ) - .build())); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - - var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1), - Optional.empty(), Optional.empty(), Optional.empty()); - assertThat(topics.getPageCount()).isEqualTo(4); - assertThat(topics.getTopics()).hasSize(25); - assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); - } - - @Test - public void shouldListBotInternalAndNonInternalTopics() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .internal(Integer.parseInt(e) % 10 == 0) - .build())) - ) - .build())); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - - var topics = clusterService.getTopics(topicName, - Optional.empty(), Optional.empty(), Optional.of(true), - Optional.empty(), Optional.empty()); - assertThat(topics.getPageCount()).isEqualTo(4); - assertThat(topics.getTopics()).hasSize(25); - assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); - } - - - @Test - public void shouldListOnlyNonInternalTopics() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .internal(Integer.parseInt(e) % 10 == 0) - .build())) - ) - .build())); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - - var topics = clusterService.getTopics(topicName, - Optional.empty(), Optional.empty(), Optional.of(true), - Optional.empty(), Optional.empty()); - assertThat(topics.getPageCount()).isEqualTo(4); - assertThat(topics.getTopics()).hasSize(25); - assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); - } - - - @Test - public void shouldListOnlyTopicsContainingOne() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .build())) - ) - .build())); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - - var topics = clusterService.getTopics(topicName, - Optional.empty(), Optional.empty(), Optional.empty(), - Optional.of("1"), Optional.empty()); - assertThat(topics.getPageCount()).isEqualTo(1); - assertThat(topics.getTopics()).hasSize(20); - assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); - } - - @Test - public void shouldListTopicsOrderedByPartitionsCount() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .partitions(Map.of()) - .name(e) - .partitionCount(100 - Integer.parseInt(e)) - .build())) - ) - .build())); - - when( - kafkaService.getTopicPartitions(any(), any()) - ).thenReturn( - Map.of() - ); - - - var topics = clusterService.getTopics(topicName, - Optional.empty(), Optional.empty(), Optional.empty(), - Optional.empty(), Optional.of(TopicColumnsToSortDTO.TOTAL_PARTITIONS)); - assertThat(topics.getPageCount()).isEqualTo(4); - assertThat(topics.getTopics()).hasSize(25); - assertThat(topics.getTopics()).map(TopicDTO::getPartitionCount).isSorted(); - } - - @Test - public void shouldRetrieveTopicConfigs() { - var topicName = UUID.randomUUID().toString(); - - when(clustersStorage.getClusterByName(topicName)) - .thenReturn(Optional.of(KafkaCluster.builder() - .topics( - IntStream.rangeClosed(1, 100).boxed() - .map(Objects::toString) - .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() - .name(e) - .topicConfigs( - List.of(InternalTopicConfig.builder() - .name("testName") - .value("testValue") - .defaultValue("testDefaultValue") - .source(ConfigEntry.ConfigSource.DEFAULT_CONFIG) - .isReadOnly(true) - .isSensitive(true) - .synonyms(List.of()) - .build() - ) - ) - .build())) - ) - .build())); - - var configs = clusterService.getTopicConfigs(topicName, "1"); - var topicConfig = configs.isPresent() ? configs.get().get(0) : null; - - assertThat(configs.isPresent()).isTrue(); - assertThat(topicConfig.getName()).isEqualTo("testName"); - assertThat(topicConfig.getValue()).isEqualTo("testValue"); - assertThat(topicConfig.getDefaultValue()).isEqualTo("testDefaultValue"); - assertThat(topicConfig.getSource().getValue()) - .isEqualTo(ConfigEntry.ConfigSource.DEFAULT_CONFIG.name()); - assertThat(topicConfig.getSynonyms()).isNotNull(); - assertThat(topicConfig.getIsReadOnly()).isTrue(); - assertThat(topicConfig.getIsSensitive()).isTrue(); - } - -} diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java index b56555a870..f41b595e79 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KsqlServiceTest.java @@ -7,7 +7,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.provectus.kafka.ui.client.KsqlClient; -import com.provectus.kafka.ui.exception.ClusterNotFoundException; import com.provectus.kafka.ui.exception.KsqlDbNotFoundException; import com.provectus.kafka.ui.exception.UnprocessableEntityException; import com.provectus.kafka.ui.model.KafkaCluster; @@ -17,7 +16,6 @@ import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy; import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy; import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy; import java.util.List; -import java.util.Optional; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -45,81 +43,58 @@ class KsqlServiceTest { this.alternativeStrategy = new DescribeStrategy(); this.ksqlService = new KsqlService( this.ksqlClient, - this.clustersStorage, List.of(baseStrategy, alternativeStrategy) ); } - @Test - void shouldThrowClusterNotFoundExceptionOnExecuteKsqlCommand() { - String clusterName = "test"; - KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;"); - when(clustersStorage.getClusterByName(clusterName)).thenReturn(Optional.ofNullable(null)); - - StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command))) - .verifyError(ClusterNotFoundException.class); - } - @Test void shouldThrowKsqlDbNotFoundExceptionOnExecuteKsqlCommand() { - String clusterName = "test"; KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;"); KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class); - when(clustersStorage.getClusterByName(clusterName)) - .thenReturn(Optional.ofNullable(kafkaCluster)); when(kafkaCluster.getKsqldbServer()).thenReturn(null); - StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command))) + StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command))) .verifyError(KsqlDbNotFoundException.class); } @Test void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() { - String clusterName = "test"; KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');"); KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class); - when(clustersStorage.getClusterByName(clusterName)) - .thenReturn(Optional.ofNullable(kafkaCluster)); when(kafkaCluster.getKsqldbServer()).thenReturn("localhost:8088"); - StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command))) + StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command))) .verifyError(UnprocessableEntityException.class); - StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command))) + StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command))) .verifyErrorMessage("Invalid sql"); } @Test void shouldSetHostToStrategy() { - String clusterName = "test"; String host = "localhost:8088"; KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;"); KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class); - when(clustersStorage.getClusterByName(clusterName)) - .thenReturn(Optional.ofNullable(kafkaCluster)); when(kafkaCluster.getKsqldbServer()).thenReturn(host); when(ksqlClient.execute(any())).thenReturn(Mono.just(new KsqlCommandResponseDTO())); - ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block(); + ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block(); assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql"); } @Test void shouldCallClientAndReturnResponse() { - String clusterName = "test"; KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;"); KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class); KsqlCommandResponseDTO response = new KsqlCommandResponseDTO().message("success"); - when(clustersStorage.getClusterByName(clusterName)) - .thenReturn(Optional.ofNullable(kafkaCluster)); when(kafkaCluster.getKsqldbServer()).thenReturn("host"); when(ksqlClient.execute(any())).thenReturn(Mono.just(response)); KsqlCommandResponseDTO receivedResponse = - ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block(); + ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block(); verify(ksqlClient, times(1)).execute(alternativeStrategy); assertThat(receivedResponse).isEqualTo(response); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java index a642364d91..4ddacc6b1a 100644 --- a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java @@ -7,6 +7,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.provectus.kafka.ui.AbstractBaseTest; import com.provectus.kafka.ui.model.ConsumerPosition; import com.provectus.kafka.ui.model.CreateTopicMessageDTO; +import com.provectus.kafka.ui.model.KafkaCluster; import com.provectus.kafka.ui.model.MessageFormatDTO; import com.provectus.kafka.ui.model.SeekDirectionDTO; import com.provectus.kafka.ui.model.SeekTypeDTO; @@ -24,6 +25,7 @@ import java.util.function.Consumer; import lombok.SneakyThrows; import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.common.TopicPartition; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.ContextConfiguration; @@ -118,12 +120,22 @@ public class SendAndReadTests extends AbstractBaseTest { private static final String JSON_SCHEMA_RECORD = "{ \"f1\": 12, \"f2\": \"testJsonSchema1\", \"schema\": \"some txt\" }"; + private KafkaCluster targetCluster; + @Autowired - private ClusterService clusterService; + private MessagesService messagesService; + + @Autowired + private ClustersStorage clustersStorage; @Autowired private ClustersMetricsScheduler clustersMetricsScheduler; + @BeforeEach + void init() { + targetCluster = clustersStorage.getClusterByName(LOCAL).get(); + } + @Test void noSchemaStringKeyStringValue() { new SendAndReadSpec() @@ -500,7 +512,8 @@ public class SendAndReadTests extends AbstractBaseTest { public void assertSendThrowsException() { String topic = createTopicAndCreateSchemas(); try { - assertThatThrownBy(() -> clusterService.sendMessage(LOCAL, topic, msgToSend).block()); + assertThatThrownBy(() -> + messagesService.sendMessage(targetCluster, topic, msgToSend).block()); } finally { deleteTopic(topic); } @@ -510,18 +523,18 @@ public class SendAndReadTests extends AbstractBaseTest { public void doAssert(Consumer msgAssert) { String topic = createTopicAndCreateSchemas(); try { - clusterService.sendMessage(LOCAL, topic, msgToSend).block(); - TopicMessageDTO polled = clusterService.getMessages( - LOCAL, - topic, - new ConsumerPosition( - SeekTypeDTO.BEGINNING, - Map.of(new TopicPartition(topic, 0), 0L), - SeekDirectionDTO.FORWARD - ), - null, - 1 - ).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) + messagesService.sendMessage(targetCluster, topic, msgToSend).block(); + TopicMessageDTO polled = messagesService.loadMessages( + targetCluster, + topic, + new ConsumerPosition( + SeekTypeDTO.BEGINNING, + Map.of(new TopicPartition(topic, 0), 0L), + SeekDirectionDTO.FORWARD + ), + null, + 1 + ).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) .map(TopicMessageEventDTO::getMessage) .blockLast(Duration.ofSeconds(5000)); diff --git a/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServiceTest.java b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServiceTest.java new file mode 100644 index 0000000000..8d008cd6fc --- /dev/null +++ b/kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServiceTest.java @@ -0,0 +1,233 @@ +package com.provectus.kafka.ui.service; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.provectus.kafka.ui.mapper.ClusterMapper; +import com.provectus.kafka.ui.model.InternalTopic; +import com.provectus.kafka.ui.model.InternalTopicConfig; +import com.provectus.kafka.ui.model.KafkaCluster; +import com.provectus.kafka.ui.model.TopicColumnsToSortDTO; +import com.provectus.kafka.ui.model.TopicDTO; +import com.provectus.kafka.ui.serde.DeserializationService; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mapstruct.factory.Mappers; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Spy; +import org.mockito.junit.jupiter.MockitoExtension; + +@ExtendWith(MockitoExtension.class) +class TopicsServiceTest { + @Spy + private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class); + @InjectMocks + private TopicsService topicsService; + @Mock + private AdminClientService adminClientService; + @Mock + private ConsumerGroupService consumerGroupService; + @Mock + private ClustersStorage clustersStorage; + + @Mock + private DeserializationService deserializationService; + + @Test + public void shouldListFirst25Topics() { + final KafkaCluster cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .build())) + ) + .build(); + + var topics = topicsService.getTopics(cluster, + Optional.empty(), Optional.empty(), Optional.empty(), + Optional.empty(), Optional.empty()); + assertThat(topics.getPageCount()).isEqualTo(4); + assertThat(topics.getTopics()).hasSize(25); + assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); + } + + @Test + public void shouldCalculateCorrectPageCountForNonDivisiblePageSize() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .build())) + ) + .build(); + + var topics = topicsService.getTopics(cluster, Optional.of(4), Optional.of(33), + Optional.empty(), Optional.empty(), Optional.empty()); + assertThat(topics.getPageCount()).isEqualTo(4); + assertThat(topics.getTopics()).hasSize(1) + .first().extracting(TopicDTO::getName).isEqualTo("99"); + } + + @Test + public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .build())) + ) + .build(); + + + var topics = topicsService.getTopics(cluster, Optional.of(0), Optional.of(-1), + Optional.empty(), Optional.empty(), Optional.empty()); + assertThat(topics.getPageCount()).isEqualTo(4); + assertThat(topics.getTopics()).hasSize(25); + assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); + } + + @Test + public void shouldListBotInternalAndNonInternalTopics() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .internal(Integer.parseInt(e) % 10 == 0) + .build())) + ) + .build(); + + var topics = topicsService.getTopics(cluster, + Optional.empty(), Optional.empty(), Optional.of(true), + Optional.empty(), Optional.empty()); + assertThat(topics.getPageCount()).isEqualTo(4); + assertThat(topics.getTopics()).hasSize(25); + assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); + } + + + @Test + public void shouldListOnlyNonInternalTopics() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .internal(Integer.parseInt(e) % 10 == 0) + .build())) + ) + .build(); + + var topics = topicsService.getTopics(cluster, + Optional.empty(), Optional.empty(), Optional.of(true), + Optional.empty(), Optional.empty()); + assertThat(topics.getPageCount()).isEqualTo(4); + assertThat(topics.getTopics()).hasSize(25); + assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); + } + + + @Test + public void shouldListOnlyTopicsContainingOne() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .build())) + ) + .build(); + + var topics = topicsService.getTopics(cluster, + Optional.empty(), Optional.empty(), Optional.empty(), + Optional.of("1"), Optional.empty()); + assertThat(topics.getPageCount()).isEqualTo(1); + assertThat(topics.getTopics()).hasSize(20); + assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted(); + } + + @Test + public void shouldListTopicsOrderedByPartitionsCount() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .partitions(Map.of()) + .name(e) + .partitionCount(100 - Integer.parseInt(e)) + .build())) + ) + .build(); + + var topics = topicsService.getTopics(cluster, + Optional.empty(), Optional.empty(), Optional.empty(), + Optional.empty(), Optional.of(TopicColumnsToSortDTO.TOTAL_PARTITIONS)); + assertThat(topics.getPageCount()).isEqualTo(4); + assertThat(topics.getTopics()).hasSize(25); + assertThat(topics.getTopics()).map(TopicDTO::getPartitionCount).isSorted(); + } + + @Test + public void shouldRetrieveTopicConfigs() { + var cluster = KafkaCluster.builder() + .topics( + IntStream.rangeClosed(1, 100).boxed() + .map(Objects::toString) + .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder() + .name(e) + .topicConfigs( + List.of(InternalTopicConfig.builder() + .name("testName") + .value("testValue") + .defaultValue("testDefaultValue") + .source(ConfigEntry.ConfigSource.DEFAULT_CONFIG) + .isReadOnly(true) + .isSensitive(true) + .synonyms(List.of()) + .build() + ) + ) + .build())) + ) + .build(); + + var configs = topicsService.getTopicConfigs(cluster, "1"); + var topicConfig = configs.isPresent() ? configs.get().get(0) : null; + + assertThat(configs.isPresent()).isTrue(); + assertThat(topicConfig.getName()).isEqualTo("testName"); + assertThat(topicConfig.getValue()).isEqualTo("testValue"); + assertThat(topicConfig.getDefaultValue()).isEqualTo("testDefaultValue"); + assertThat(topicConfig.getSource().getValue()) + .isEqualTo(ConfigEntry.ConfigSource.DEFAULT_CONFIG.name()); + assertThat(topicConfig.getSynonyms()).isNotNull(); + assertThat(topicConfig.getIsReadOnly()).isTrue(); + assertThat(topicConfig.getIsSensitive()).isTrue(); + } + +}