BE Services split (#964)
BE Services split: * Unrelated logic moved from ClusterService to proper services * KafkaCluster existence check moved to controllers level * useless interfaces removed
This commit is contained in:
parent
ad19571eca
commit
d0f63aeaa0
28 changed files with 1787 additions and 2004 deletions
|
@ -0,0 +1,22 @@
|
||||||
|
package com.provectus.kafka.ui.controller;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
|
||||||
|
public abstract class AbstractController {
|
||||||
|
|
||||||
|
private ClustersStorage clustersStorage;
|
||||||
|
|
||||||
|
protected KafkaCluster getCluster(String name) {
|
||||||
|
return clustersStorage.getClusterByName(name)
|
||||||
|
.orElseThrow(() -> new ClusterNotFoundException(
|
||||||
|
String.format("Cluster with name '%s' not found", name)));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||||
|
this.clustersStorage = clustersStorage;
|
||||||
|
}
|
||||||
|
}
|
|
@ -7,7 +7,7 @@ import com.provectus.kafka.ui.model.BrokerDTO;
|
||||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
||||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||||
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
||||||
import com.provectus.kafka.ui.service.ClusterService;
|
import com.provectus.kafka.ui.service.BrokerService;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.extern.log4j.Log4j2;
|
import lombok.extern.log4j.Log4j2;
|
||||||
|
@ -20,13 +20,13 @@ import reactor.core.publisher.Mono;
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class BrokersController implements BrokersApi {
|
public class BrokersController extends AbstractController implements BrokersApi {
|
||||||
private final ClusterService clusterService;
|
private final BrokerService brokerService;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
|
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return clusterService.getBrokerMetrics(clusterName, id)
|
return brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||||
.map(ResponseEntity::ok)
|
.map(ResponseEntity::ok)
|
||||||
.onErrorReturn(ResponseEntity.notFound().build());
|
.onErrorReturn(ResponseEntity.notFound().build());
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ public class BrokersController implements BrokersApi {
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return Mono.just(ResponseEntity.ok(clusterService.getBrokers(clusterName)));
|
return Mono.just(ResponseEntity.ok(brokerService.getBrokers(getCluster(clusterName))));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -42,13 +42,15 @@ public class BrokersController implements BrokersApi {
|
||||||
List<Integer> brokers,
|
List<Integer> brokers,
|
||||||
ServerWebExchange exchange
|
ServerWebExchange exchange
|
||||||
) {
|
) {
|
||||||
return Mono.just(ResponseEntity.ok(clusterService.getAllBrokersLogdirs(clusterName, brokers)));
|
return Mono.just(ResponseEntity.ok(
|
||||||
|
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName, Integer id,
|
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName, Integer id,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return Mono.just(ResponseEntity.ok(clusterService.getBrokerConfig(clusterName, id)));
|
return Mono.just(ResponseEntity.ok(
|
||||||
|
brokerService.getBrokerConfig(getCluster(clusterName), id)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -56,7 +58,7 @@ public class BrokersController implements BrokersApi {
|
||||||
String clusterName, Integer id, Mono<BrokerLogdirUpdateDTO> brokerLogdir,
|
String clusterName, Integer id, Mono<BrokerLogdirUpdateDTO> brokerLogdir,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return brokerLogdir
|
return brokerLogdir
|
||||||
.flatMap(bld -> clusterService.updateBrokerLogDir(clusterName, id, bld))
|
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,8 +69,8 @@ public class BrokersController implements BrokersApi {
|
||||||
Mono<BrokerConfigItemDTO> brokerConfig,
|
Mono<BrokerConfigItemDTO> brokerConfig,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return brokerConfig
|
return brokerConfig
|
||||||
.flatMap(bci -> clusterService.updateBrokerConfigByName(
|
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||||
clusterName, id, name, bci.getValue()))
|
getCluster(clusterName), id, name, bci.getValue()))
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,14 +3,12 @@ package com.provectus.kafka.ui.controller;
|
||||||
import static java.util.stream.Collectors.toMap;
|
import static java.util.stream.Collectors.toMap;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.api.ConsumerGroupsApi;
|
import com.provectus.kafka.ui.api.ConsumerGroupsApi;
|
||||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.ValidationException;
|
import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupOffsetsResetDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupOffsetsResetDTO;
|
||||||
import com.provectus.kafka.ui.model.PartitionOffsetDTO;
|
import com.provectus.kafka.ui.model.PartitionOffsetDTO;
|
||||||
import com.provectus.kafka.ui.service.ClusterService;
|
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
|
||||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
@ -26,22 +24,22 @@ import reactor.core.publisher.Mono;
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class ConsumerGroupsController implements ConsumerGroupsApi {
|
public class ConsumerGroupsController extends AbstractController implements ConsumerGroupsApi {
|
||||||
private final ClusterService clusterService;
|
|
||||||
|
private final ConsumerGroupService consumerGroupService;
|
||||||
private final OffsetsResetService offsetsResetService;
|
private final OffsetsResetService offsetsResetService;
|
||||||
private final ClustersStorage clustersStorage;
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName, String id,
|
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName, String id,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return clusterService.deleteConsumerGroupById(clusterName, id)
|
return consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(
|
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(
|
||||||
String clusterName, String consumerGroupId, ServerWebExchange exchange) {
|
String clusterName, String consumerGroupId, ServerWebExchange exchange) {
|
||||||
return clusterService.getConsumerGroupDetail(clusterName, consumerGroupId)
|
return consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +47,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName,
|
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return clusterService.getConsumerGroups(clusterName)
|
return consumerGroupService.getConsumerGroups(getCluster(clusterName))
|
||||||
.map(Flux::fromIterable)
|
.map(Flux::fromIterable)
|
||||||
.map(ResponseEntity::ok)
|
.map(ResponseEntity::ok)
|
||||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||||
|
@ -58,7 +56,8 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
|
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
|
||||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||||
return clusterService.getConsumerGroups(clusterName, Optional.of(topicName))
|
return consumerGroupService.getConsumerGroups(
|
||||||
|
getCluster(clusterName), Optional.of(topicName))
|
||||||
.map(Flux::fromIterable)
|
.map(Flux::fromIterable)
|
||||||
.map(ResponseEntity::ok)
|
.map(ResponseEntity::ok)
|
||||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||||
|
@ -71,9 +70,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
||||||
consumerGroupOffsetsReset,
|
consumerGroupOffsetsReset,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return consumerGroupOffsetsReset.flatMap(reset -> {
|
return consumerGroupOffsetsReset.flatMap(reset -> {
|
||||||
var cluster =
|
var cluster = getCluster(clusterName);
|
||||||
clustersStorage.getClusterByName(clusterName).orElseThrow(ClusterNotFoundException::new);
|
|
||||||
|
|
||||||
switch (reset.getResetType()) {
|
switch (reset.getResetType()) {
|
||||||
case EARLIEST:
|
case EARLIEST:
|
||||||
return offsetsResetService
|
return offsetsResetService
|
||||||
|
|
|
@ -23,19 +23,19 @@ import reactor.core.publisher.Mono;
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class KafkaConnectController implements KafkaConnectApi {
|
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
|
||||||
private final KafkaConnectService kafkaConnectService;
|
private final KafkaConnectService kafkaConnectService;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.getConnects(clusterName).map(ResponseEntity::ok);
|
return kafkaConnectService.getConnects(getCluster(clusterName)).map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
|
public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
Flux<String> connectors = kafkaConnectService.getConnectors(clusterName, connectName);
|
var connectors = kafkaConnectService.getConnectors(getCluster(clusterName), connectName);
|
||||||
return Mono.just(ResponseEntity.ok(connectors));
|
return Mono.just(ResponseEntity.ok(connectors));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName,
|
public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName,
|
||||||
@Valid Mono<NewConnectorDTO> connector,
|
@Valid Mono<NewConnectorDTO> connector,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.createConnector(clusterName, connectName, connector)
|
return kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +51,7 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName,
|
public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName,
|
||||||
String connectorName,
|
String connectorName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.getConnector(clusterName, connectName, connectorName)
|
return kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String connectName,
|
public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String connectName,
|
||||||
String connectorName,
|
String connectorName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.deleteConnector(clusterName, connectName, connectorName)
|
return kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,7 +70,8 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
String search,
|
String search,
|
||||||
ServerWebExchange exchange
|
ServerWebExchange exchange
|
||||||
) {
|
) {
|
||||||
return Mono.just(ResponseEntity.ok(kafkaConnectService.getAllConnectors(clusterName, search)));
|
return Mono.just(ResponseEntity.ok(
|
||||||
|
kafkaConnectService.getAllConnectors(getCluster(clusterName), search)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -78,7 +79,8 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
String connectName,
|
String connectName,
|
||||||
String connectorName,
|
String connectorName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.getConnectorConfig(clusterName, connectName, connectorName)
|
return kafkaConnectService
|
||||||
|
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +91,7 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
@Valid Mono<Object> requestBody,
|
@Valid Mono<Object> requestBody,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService
|
return kafkaConnectService
|
||||||
.setConnectorConfig(clusterName, connectName, connectorName, requestBody)
|
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,7 +100,8 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
String connectorName,
|
String connectorName,
|
||||||
ConnectorActionDTO action,
|
ConnectorActionDTO action,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.updateConnectorState(clusterName, connectName, connectorName, action)
|
return kafkaConnectService
|
||||||
|
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,21 +111,24 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
String connectorName,
|
String connectorName,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return Mono.just(ResponseEntity
|
return Mono.just(ResponseEntity
|
||||||
.ok(kafkaConnectService.getConnectorTasks(clusterName, connectName, connectorName)));
|
.ok(kafkaConnectService
|
||||||
|
.getConnectorTasks(getCluster(clusterName), connectName, connectorName)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Void>> restartConnectorTask(String clusterName, String connectName,
|
public Mono<ResponseEntity<Void>> restartConnectorTask(String clusterName, String connectName,
|
||||||
String connectorName, Integer taskId,
|
String connectorName, Integer taskId,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.restartConnectorTask(clusterName, connectName, connectorName, taskId)
|
return kafkaConnectService
|
||||||
|
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
|
public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
|
||||||
String clusterName, String connectName, ServerWebExchange exchange) {
|
String clusterName, String connectName, ServerWebExchange exchange) {
|
||||||
return kafkaConnectService.getConnectorPlugins(clusterName, connectName)
|
return kafkaConnectService
|
||||||
|
.getConnectorPlugins(getCluster(clusterName), connectName)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,7 +138,8 @@ public class KafkaConnectController implements KafkaConnectApi {
|
||||||
String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
|
String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return kafkaConnectService
|
return kafkaConnectService
|
||||||
.validateConnectorPluginConfig(clusterName, connectName, pluginName, requestBody)
|
.validateConnectorPluginConfig(
|
||||||
|
getCluster(clusterName), connectName, pluginName, requestBody)
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ import reactor.core.publisher.Mono;
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class KsqlController implements KsqlApi {
|
public class KsqlController extends AbstractController implements KsqlApi {
|
||||||
private final KsqlService ksqlService;
|
private final KsqlService ksqlService;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -22,6 +22,7 @@ public class KsqlController implements KsqlApi {
|
||||||
Mono<KsqlCommandDTO>
|
Mono<KsqlCommandDTO>
|
||||||
ksqlCommand,
|
ksqlCommand,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return ksqlService.executeKsqlCommand(clusterName, ksqlCommand).map(ResponseEntity::ok);
|
return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand)
|
||||||
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,8 @@ import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||||
import com.provectus.kafka.ui.service.ClusterService;
|
import com.provectus.kafka.ui.service.MessagesService;
|
||||||
|
import com.provectus.kafka.ui.service.TopicsService;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
@ -26,15 +27,16 @@ import reactor.core.publisher.Mono;
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class MessagesController implements MessagesApi {
|
public class MessagesController extends AbstractController implements MessagesApi {
|
||||||
private final ClusterService clusterService;
|
private final MessagesService messagesService;
|
||||||
|
private final TopicsService topicsService;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||||
String clusterName, String topicName, @Valid List<Integer> partitions,
|
String clusterName, String topicName, @Valid List<Integer> partitions,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return clusterService.deleteTopicMessages(
|
return messagesService.deleteTopicMessages(
|
||||||
clusterName,
|
getCluster(clusterName),
|
||||||
topicName,
|
topicName,
|
||||||
Optional.ofNullable(partitions).orElse(List.of())
|
Optional.ofNullable(partitions).orElse(List.of())
|
||||||
).map(ResponseEntity::ok);
|
).map(ResponseEntity::ok);
|
||||||
|
@ -48,7 +50,8 @@ public class MessagesController implements MessagesApi {
|
||||||
return parseConsumerPosition(topicName, seekType, seekTo, seekDirection)
|
return parseConsumerPosition(topicName, seekType, seekTo, seekDirection)
|
||||||
.map(position ->
|
.map(position ->
|
||||||
ResponseEntity.ok(
|
ResponseEntity.ok(
|
||||||
clusterService.getMessages(clusterName, topicName, position, q, limit)
|
messagesService.loadMessages(
|
||||||
|
getCluster(clusterName), topicName, position, q, limit)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -56,7 +59,7 @@ public class MessagesController implements MessagesApi {
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<TopicMessageSchemaDTO>> getTopicSchema(
|
public Mono<ResponseEntity<TopicMessageSchemaDTO>> getTopicSchema(
|
||||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||||
return Mono.just(clusterService.getTopicSchema(clusterName, topicName))
|
return Mono.just(topicsService.getTopicSchema(getCluster(clusterName), topicName))
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,7 +68,7 @@ public class MessagesController implements MessagesApi {
|
||||||
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
|
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return createTopicMessage.flatMap(msg ->
|
return createTopicMessage.flatMap(msg ->
|
||||||
clusterService.sendMessage(clusterName, topicName, msg)
|
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
|
||||||
).map(ResponseEntity::ok);
|
).map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ import com.provectus.kafka.ui.model.TopicDTO;
|
||||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||||
import com.provectus.kafka.ui.service.ClusterService;
|
import com.provectus.kafka.ui.service.TopicsService;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import javax.validation.Valid;
|
import javax.validation.Valid;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
|
@ -27,13 +27,13 @@ import reactor.core.publisher.Mono;
|
||||||
@RestController
|
@RestController
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class TopicsController implements TopicsApi {
|
public class TopicsController extends AbstractController implements TopicsApi {
|
||||||
private final ClusterService clusterService;
|
private final TopicsService topicsService;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
||||||
String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
|
String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
|
||||||
return clusterService.createTopic(clusterName, topicCreation)
|
return topicsService.createTopic(getCluster(clusterName), topicCreation)
|
||||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ public class TopicsController implements TopicsApi {
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Void>> deleteTopic(
|
public Mono<ResponseEntity<Void>> deleteTopic(
|
||||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||||
return clusterService.deleteTopic(clusterName, topicName).map(ResponseEntity::ok);
|
return topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ public class TopicsController implements TopicsApi {
|
||||||
public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
|
public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
|
||||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||||
return Mono.just(
|
return Mono.just(
|
||||||
clusterService.getTopicConfigs(clusterName, topicName)
|
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
|
||||||
.map(Flux::fromIterable)
|
.map(Flux::fromIterable)
|
||||||
.map(ResponseEntity::ok)
|
.map(ResponseEntity::ok)
|
||||||
.orElse(ResponseEntity.notFound().build())
|
.orElse(ResponseEntity.notFound().build())
|
||||||
|
@ -60,7 +60,7 @@ public class TopicsController implements TopicsApi {
|
||||||
public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
|
public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
|
||||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||||
return Mono.just(
|
return Mono.just(
|
||||||
clusterService.getTopicDetails(clusterName, topicName)
|
topicsService.getTopicDetails(getCluster(clusterName), topicName)
|
||||||
.map(ResponseEntity::ok)
|
.map(ResponseEntity::ok)
|
||||||
.orElse(ResponseEntity.notFound().build())
|
.orElse(ResponseEntity.notFound().build())
|
||||||
);
|
);
|
||||||
|
@ -73,9 +73,9 @@ public class TopicsController implements TopicsApi {
|
||||||
@Valid String search,
|
@Valid String search,
|
||||||
@Valid TopicColumnsToSortDTO orderBy,
|
@Valid TopicColumnsToSortDTO orderBy,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return Mono.just(ResponseEntity.ok(clusterService
|
return Mono.just(ResponseEntity.ok(topicsService
|
||||||
.getTopics(
|
.getTopics(
|
||||||
clusterName,
|
getCluster(clusterName),
|
||||||
Optional.ofNullable(page),
|
Optional.ofNullable(page),
|
||||||
Optional.ofNullable(perPage),
|
Optional.ofNullable(perPage),
|
||||||
Optional.ofNullable(showInternal),
|
Optional.ofNullable(showInternal),
|
||||||
|
@ -88,7 +88,8 @@ public class TopicsController implements TopicsApi {
|
||||||
public Mono<ResponseEntity<TopicDTO>> updateTopic(
|
public Mono<ResponseEntity<TopicDTO>> updateTopic(
|
||||||
String clusterId, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
|
String clusterId, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok);
|
return topicsService
|
||||||
|
.updateTopic(getCluster(clusterId), topicName, topicUpdate).map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -97,7 +98,8 @@ public class TopicsController implements TopicsApi {
|
||||||
Mono<PartitionsIncreaseDTO> partitionsIncrease,
|
Mono<PartitionsIncreaseDTO> partitionsIncrease,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return partitionsIncrease.flatMap(
|
return partitionsIncrease.flatMap(
|
||||||
partitions -> clusterService.increaseTopicPartitions(clusterName, topicName, partitions))
|
partitions ->
|
||||||
|
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions))
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +109,8 @@ public class TopicsController implements TopicsApi {
|
||||||
Mono<ReplicationFactorChangeDTO> replicationFactorChange,
|
Mono<ReplicationFactorChangeDTO> replicationFactorChange,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
return replicationFactorChange
|
return replicationFactorChange
|
||||||
.flatMap(rfc -> clusterService.changeReplicationFactor(clusterName, topicName, rfc))
|
.flatMap(rfc ->
|
||||||
|
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,46 +1,171 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||||
|
import com.provectus.kafka.ui.exception.InvalidRequestApiException;
|
||||||
|
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
|
||||||
|
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||||
|
import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException;
|
||||||
|
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||||
|
import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper;
|
||||||
|
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||||
|
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
||||||
|
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||||
|
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import lombok.extern.log4j.Log4j2;
|
||||||
|
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||||
import org.apache.kafka.common.Node;
|
import org.apache.kafka.common.Node;
|
||||||
|
import org.apache.kafka.common.TopicPartitionReplica;
|
||||||
|
import org.apache.kafka.common.errors.InvalidRequestException;
|
||||||
|
import org.apache.kafka.common.errors.LogDirNotFoundException;
|
||||||
|
import org.apache.kafka.common.errors.TimeoutException;
|
||||||
|
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||||
|
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
public interface BrokerService {
|
@Service
|
||||||
/**
|
@RequiredArgsConstructor
|
||||||
* Get brokers config as map (Config name, Config).
|
@Log4j2
|
||||||
*
|
public class BrokerService {
|
||||||
* @param cluster - cluster
|
|
||||||
* @param brokerId - node id
|
|
||||||
* @return Mono of Map(String, InternalBrokerConfig)
|
|
||||||
*/
|
|
||||||
Mono<Map<String, InternalBrokerConfig>> getBrokerConfigMap(KafkaCluster cluster,
|
|
||||||
Integer brokerId);
|
|
||||||
|
|
||||||
/**
|
private final AdminClientService adminClientService;
|
||||||
* Get brokers config as Flux of InternalBrokerConfig.
|
private final DescribeLogDirsMapper describeLogDirsMapper;
|
||||||
*
|
private final ClusterMapper clusterMapper;
|
||||||
* @param cluster - cluster
|
|
||||||
* @param brokerId - node id
|
private Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(
|
||||||
* @return Flux of InternalBrokerConfig
|
KafkaCluster cluster, List<Integer> brokersIds) {
|
||||||
*/
|
return adminClientService.get(cluster)
|
||||||
Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId);
|
.flatMap(ac -> ac.loadBrokersConfig(brokersIds));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
||||||
|
KafkaCluster cluster, Integer brokerId) {
|
||||||
|
return loadBrokersConfig(cluster, Collections.singletonList(brokerId))
|
||||||
|
.map(map -> map.values().stream()
|
||||||
|
.findFirst()
|
||||||
|
.orElseThrow(() -> new IllegalEntityStateException(
|
||||||
|
String.format("Config for broker %s not found", brokerId)))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<String, InternalBrokerConfig>> getBrokerConfigMap(KafkaCluster cluster,
|
||||||
|
Integer brokerId) {
|
||||||
|
return loadBrokersConfig(cluster, brokerId)
|
||||||
|
.map(list -> list.stream()
|
||||||
|
.collect(Collectors.toMap(
|
||||||
|
ConfigEntry::name,
|
||||||
|
ClusterUtil::mapToInternalBrokerConfig)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId) {
|
||||||
|
if (!cluster.getBrokers().contains(brokerId)) {
|
||||||
|
return Flux.error(
|
||||||
|
new NotFoundException(String.format("Broker with id %s not found", brokerId)));
|
||||||
|
}
|
||||||
|
return loadBrokersConfig(cluster, brokerId)
|
||||||
|
.map(list -> list.stream()
|
||||||
|
.map(ClusterUtil::mapToInternalBrokerConfig)
|
||||||
|
.collect(Collectors.toList()))
|
||||||
|
.flatMapMany(Flux::fromIterable);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Flux<BrokerDTO> getBrokers(KafkaCluster cluster) {
|
||||||
|
return adminClientService
|
||||||
|
.get(cluster)
|
||||||
|
.flatMap(ReactiveAdminClient::describeCluster)
|
||||||
|
.map(description -> description.getNodes().stream()
|
||||||
|
.map(node -> {
|
||||||
|
BrokerDTO broker = new BrokerDTO();
|
||||||
|
broker.setId(node.id());
|
||||||
|
broker.setHost(node.host());
|
||||||
|
return broker;
|
||||||
|
}).collect(Collectors.toList()))
|
||||||
|
.flatMapMany(Flux::fromIterable);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Node> getController(KafkaCluster cluster) {
|
||||||
|
return adminClientService
|
||||||
|
.get(cluster)
|
||||||
|
.flatMap(ReactiveAdminClient::describeCluster)
|
||||||
|
.map(ReactiveAdminClient.ClusterDescription::getController);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster,
|
||||||
|
Integer broker,
|
||||||
|
BrokerLogdirUpdateDTO brokerLogDir) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<Void> updateBrokerLogDir(ReactiveAdminClient admin,
|
||||||
|
BrokerLogdirUpdateDTO b,
|
||||||
|
Integer broker) {
|
||||||
|
|
||||||
|
Map<TopicPartitionReplica, String> req = Map.of(
|
||||||
|
new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker),
|
||||||
|
b.getLogDir());
|
||||||
|
return admin.alterReplicaLogDirs(req)
|
||||||
|
.onErrorResume(UnknownTopicOrPartitionException.class,
|
||||||
|
e -> Mono.error(new TopicOrPartitionNotFoundException()))
|
||||||
|
.onErrorResume(LogDirNotFoundException.class,
|
||||||
|
e -> Mono.error(new LogDirNotFoundApiException()))
|
||||||
|
.doOnError(log::error);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> updateBrokerConfigByName(KafkaCluster cluster,
|
||||||
|
Integer broker,
|
||||||
|
String name,
|
||||||
|
String value) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(ac -> ac.updateBrokerConfigByName(broker, name, value))
|
||||||
|
.onErrorResume(InvalidRequestException.class,
|
||||||
|
e -> Mono.error(new InvalidRequestApiException(e.getMessage())))
|
||||||
|
.doOnError(log::error);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
|
||||||
|
KafkaCluster cluster, List<Integer> reqBrokers) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(admin -> {
|
||||||
|
List<Integer> brokers = new ArrayList<>(cluster.getBrokers());
|
||||||
|
if (reqBrokers != null && !reqBrokers.isEmpty()) {
|
||||||
|
brokers.retainAll(reqBrokers);
|
||||||
|
}
|
||||||
|
return admin.describeLogDirs(brokers);
|
||||||
|
})
|
||||||
|
.onErrorResume(TimeoutException.class, (TimeoutException e) -> {
|
||||||
|
log.error("Error during fetching log dirs", e);
|
||||||
|
return Mono.just(new HashMap<>());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Flux<BrokersLogdirsDTO> getAllBrokersLogdirs(KafkaCluster cluster, List<Integer> brokers) {
|
||||||
|
return getClusterLogDirs(cluster, brokers)
|
||||||
|
.map(describeLogDirsMapper::toBrokerLogDirsList)
|
||||||
|
.flatMapMany(Flux::fromIterable);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Flux<BrokerConfigDTO> getBrokerConfig(KafkaCluster cluster, Integer brokerId) {
|
||||||
|
return getBrokersConfig(cluster, brokerId)
|
||||||
|
.map(clusterMapper::toBrokerConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<BrokerMetricsDTO> getBrokerMetrics(KafkaCluster cluster, Integer id) {
|
||||||
|
return Mono.just(cluster.getMetrics().getInternalBrokerMetrics())
|
||||||
|
.map(m -> m.get(id))
|
||||||
|
.map(clusterMapper::toBrokerMetrics);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get active brokers in cluster.
|
|
||||||
*
|
|
||||||
* @param cluster - cluster
|
|
||||||
* @return Flux of Broker
|
|
||||||
*/
|
|
||||||
Flux<BrokerDTO> getBrokers(KafkaCluster cluster);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get cluster controller node.
|
|
||||||
*
|
|
||||||
* @param cluster - cluster
|
|
||||||
* @return Controller node
|
|
||||||
*/
|
|
||||||
Mono<Node> getController(KafkaCluster cluster);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,89 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
|
||||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
|
||||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.extern.log4j.Log4j2;
|
|
||||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
|
||||||
import org.apache.kafka.common.Node;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import reactor.core.publisher.Flux;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@Log4j2
|
|
||||||
public class BrokerServiceImpl implements BrokerService {
|
|
||||||
|
|
||||||
private final AdminClientService adminClientService;
|
|
||||||
|
|
||||||
private Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(
|
|
||||||
KafkaCluster cluster, List<Integer> brokersIds) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac -> ac.loadBrokersConfig(brokersIds));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
|
||||||
KafkaCluster cluster, Integer brokerId) {
|
|
||||||
return loadBrokersConfig(cluster, Collections.singletonList(brokerId))
|
|
||||||
.map(map -> map.values().stream()
|
|
||||||
.findFirst()
|
|
||||||
.orElseThrow(() -> new IllegalEntityStateException(
|
|
||||||
String.format("Config for broker %s not found", brokerId)))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Mono<Map<String, InternalBrokerConfig>> getBrokerConfigMap(KafkaCluster cluster,
|
|
||||||
Integer brokerId) {
|
|
||||||
return loadBrokersConfig(cluster, brokerId)
|
|
||||||
.map(list -> list.stream()
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
ConfigEntry::name,
|
|
||||||
ClusterUtil::mapToInternalBrokerConfig)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId) {
|
|
||||||
if (!cluster.getBrokers().contains(brokerId)) {
|
|
||||||
return Flux.error(
|
|
||||||
new NotFoundException(String.format("Broker with id %s not found", brokerId)));
|
|
||||||
}
|
|
||||||
return loadBrokersConfig(cluster, brokerId)
|
|
||||||
.map(list -> list.stream()
|
|
||||||
.map(ClusterUtil::mapToInternalBrokerConfig)
|
|
||||||
.collect(Collectors.toList()))
|
|
||||||
.flatMapMany(Flux::fromIterable);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Flux<BrokerDTO> getBrokers(KafkaCluster cluster) {
|
|
||||||
return adminClientService
|
|
||||||
.get(cluster)
|
|
||||||
.flatMap(ReactiveAdminClient::describeCluster)
|
|
||||||
.map(description -> description.getNodes().stream()
|
|
||||||
.map(node -> {
|
|
||||||
BrokerDTO broker = new BrokerDTO();
|
|
||||||
broker.setId(node.id());
|
|
||||||
broker.setHost(node.host());
|
|
||||||
return broker;
|
|
||||||
}).collect(Collectors.toList()))
|
|
||||||
.flatMapMany(Flux::fromIterable);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Mono<Node> getController(KafkaCluster cluster) {
|
|
||||||
return adminClientService
|
|
||||||
.get(cluster)
|
|
||||||
.flatMap(ReactiveAdminClient::describeCluster)
|
|
||||||
.map(ReactiveAdminClient.ClusterDescription::getController);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,73 +1,26 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
|
||||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.ValidationException;
|
|
||||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||||
import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper;
|
|
||||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
|
||||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
|
||||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
|
||||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
|
||||||
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ClusterDTO;
|
import com.provectus.kafka.ui.model.ClusterDTO;
|
||||||
import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
||||||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
||||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
|
||||||
import com.provectus.kafka.ui.model.Feature;
|
|
||||||
import com.provectus.kafka.ui.model.InternalTopic;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
|
||||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.function.Predicate;
|
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import lombok.extern.log4j.Log4j2;
|
import lombok.extern.log4j.Log4j2;
|
||||||
import org.apache.commons.lang3.StringUtils;
|
|
||||||
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
|
||||||
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
|
||||||
import org.jetbrains.annotations.NotNull;
|
|
||||||
import org.springframework.stereotype.Service;
|
import org.springframework.stereotype.Service;
|
||||||
import reactor.core.publisher.Flux;
|
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
@Service
|
@Service
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class ClusterService {
|
public class ClusterService {
|
||||||
private static final Integer DEFAULT_PAGE_SIZE = 25;
|
|
||||||
|
|
||||||
private final ClustersStorage clustersStorage;
|
private final ClustersStorage clustersStorage;
|
||||||
private final ClusterMapper clusterMapper;
|
private final ClusterMapper clusterMapper;
|
||||||
private final KafkaService kafkaService;
|
private final MetricsService metricsService;
|
||||||
private final AdminClientService adminClientService;
|
|
||||||
private final BrokerService brokerService;
|
|
||||||
private final ConsumingService consumingService;
|
|
||||||
private final DeserializationService deserializationService;
|
|
||||||
private final DescribeLogDirsMapper describeLogDirsMapper;
|
|
||||||
|
|
||||||
public List<ClusterDTO> getClusters() {
|
public List<ClusterDTO> getClusters() {
|
||||||
return clustersStorage.getKafkaClusters()
|
return clustersStorage.getKafkaClusters()
|
||||||
|
@ -76,13 +29,6 @@ public class ClusterService {
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<BrokerMetricsDTO> getBrokerMetrics(String name, Integer id) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(name)
|
|
||||||
.map(c -> c.getMetrics().getInternalBrokerMetrics())
|
|
||||||
.map(m -> m.get(id))
|
|
||||||
.map(clusterMapper::toBrokerMetrics));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<ClusterStatsDTO> getClusterStats(String name) {
|
public Mono<ClusterStatsDTO> getClusterStats(String name) {
|
||||||
return Mono.justOrEmpty(
|
return Mono.justOrEmpty(
|
||||||
clustersStorage.getClusterByName(name)
|
clustersStorage.getClusterByName(name)
|
||||||
|
@ -99,293 +45,12 @@ public class ClusterService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public TopicsResponseDTO getTopics(String name, Optional<Integer> page,
|
|
||||||
Optional<Integer> nullablePerPage,
|
|
||||||
Optional<Boolean> showInternal,
|
|
||||||
Optional<String> search,
|
|
||||||
Optional<TopicColumnsToSortDTO> sortBy) {
|
|
||||||
Predicate<Integer> positiveInt = i -> i > 0;
|
|
||||||
int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
|
||||||
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
|
|
||||||
var cluster = clustersStorage.getClusterByName(name)
|
|
||||||
.orElseThrow(ClusterNotFoundException::new);
|
|
||||||
List<InternalTopic> topics = cluster.getTopics().values().stream()
|
|
||||||
.filter(topic -> !topic.isInternal()
|
|
||||||
|| showInternal
|
|
||||||
.map(i -> topic.isInternal() == i)
|
|
||||||
.orElse(true))
|
|
||||||
.filter(topic ->
|
|
||||||
search
|
|
||||||
.map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
|
|
||||||
.orElse(true))
|
|
||||||
.sorted(getComparatorForTopic(sortBy))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
var totalPages = (topics.size() / perPage)
|
|
||||||
+ (topics.size() % perPage == 0 ? 0 : 1);
|
|
||||||
return new TopicsResponseDTO()
|
|
||||||
.pageCount(totalPages)
|
|
||||||
.topics(
|
|
||||||
topics.stream()
|
|
||||||
.skip(topicsToSkip)
|
|
||||||
.limit(perPage)
|
|
||||||
.map(t ->
|
|
||||||
clusterMapper.toTopic(
|
|
||||||
t.toBuilder().partitions(
|
|
||||||
kafkaService.getTopicPartitions(cluster, t)
|
|
||||||
).build()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Comparator<InternalTopic> getComparatorForTopic(Optional<TopicColumnsToSortDTO> sortBy) {
|
|
||||||
var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
|
||||||
if (sortBy.isEmpty()) {
|
|
||||||
return defaultComparator;
|
|
||||||
}
|
|
||||||
switch (sortBy.get()) {
|
|
||||||
case TOTAL_PARTITIONS:
|
|
||||||
return Comparator.comparing(InternalTopic::getPartitionCount);
|
|
||||||
case OUT_OF_SYNC_REPLICAS:
|
|
||||||
return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
|
|
||||||
case REPLICATION_FACTOR:
|
|
||||||
return Comparator.comparing(InternalTopic::getReplicationFactor);
|
|
||||||
case NAME:
|
|
||||||
default:
|
|
||||||
return defaultComparator;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Optional<TopicDetailsDTO> getTopicDetails(String name, String topicName) {
|
|
||||||
return clustersStorage.getClusterByName(name)
|
|
||||||
.flatMap(c ->
|
|
||||||
Optional.ofNullable(c.getTopics()).map(l -> l.get(topicName)).map(
|
|
||||||
t -> t.toBuilder().partitions(
|
|
||||||
kafkaService.getTopicPartitions(c, t)
|
|
||||||
).build()
|
|
||||||
).map(t -> clusterMapper.toTopicDetails(t, c.getMetrics()))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Optional<List<TopicConfigDTO>> getTopicConfigs(String name, String topicName) {
|
|
||||||
return clustersStorage.getClusterByName(name)
|
|
||||||
.map(KafkaCluster::getTopics)
|
|
||||||
.map(t -> t.get(topicName))
|
|
||||||
.map(t -> t.getTopicConfigs().stream().map(clusterMapper::toTopicConfig)
|
|
||||||
.collect(Collectors.toList()));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<TopicDTO> createTopic(String clusterName, Mono<TopicCreationDTO> topicCreation) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
|
||||||
kafkaService.createTopic(cluster, topicCreation)
|
|
||||||
.doOnNext(t -> updateCluster(t, clusterName, cluster))
|
|
||||||
.map(clusterMapper::toTopic)
|
|
||||||
).orElse(Mono.empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
public Mono<ConsumerGroupDetailsDTO> getConsumerGroupDetail(String clusterName,
|
|
||||||
String consumerGroupId) {
|
|
||||||
var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
|
|
||||||
return kafkaService.getConsumerGroups(
|
|
||||||
cluster,
|
|
||||||
Optional.empty(),
|
|
||||||
Collections.singletonList(consumerGroupId)
|
|
||||||
).filter(groups -> !groups.isEmpty()).map(groups -> groups.get(0)).map(
|
|
||||||
ClusterUtil::convertToConsumerGroupDetails
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<List<ConsumerGroupDTO>> getConsumerGroups(String clusterName) {
|
|
||||||
return getConsumerGroups(clusterName, Optional.empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<List<ConsumerGroupDTO>> getConsumerGroups(String clusterName,
|
|
||||||
Optional<String> topic) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
|
||||||
.flatMap(c -> kafkaService.getConsumerGroups(c, topic, Collections.emptyList()))
|
|
||||||
.map(c ->
|
|
||||||
c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Flux<BrokerDTO> getBrokers(String clusterName) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
|
||||||
.flatMapMany(brokerService::getBrokers);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Flux<BrokerConfigDTO> getBrokerConfig(String clusterName, Integer brokerId) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
|
||||||
.flatMapMany(c -> brokerService.getBrokersConfig(c, brokerId))
|
|
||||||
.map(clusterMapper::toBrokerConfig);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
public Mono<TopicDTO> updateTopic(String clusterName, String topicName,
|
|
||||||
Mono<TopicUpdateDTO> topicUpdate) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName).map(cl ->
|
|
||||||
topicUpdate
|
|
||||||
.flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
|
|
||||||
.doOnNext(t -> updateCluster(t, clusterName, cl))
|
|
||||||
.map(clusterMapper::toTopic)
|
|
||||||
).orElse(Mono.empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteTopic(String clusterName, String topicName) {
|
|
||||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
|
||||||
.orElseThrow(ClusterNotFoundException::new);
|
|
||||||
var topic = getTopicDetails(clusterName, topicName)
|
|
||||||
.orElseThrow(TopicNotFoundException::new);
|
|
||||||
if (cluster.getFeatures().contains(Feature.TOPIC_DELETION)) {
|
|
||||||
return kafkaService.deleteTopic(cluster, topic.getName())
|
|
||||||
.doOnSuccess(t -> updateCluster(topicName, clusterName, cluster));
|
|
||||||
} else {
|
|
||||||
return Mono.error(new ValidationException("Topic deletion restricted"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private KafkaCluster updateCluster(InternalTopic topic, String clusterName,
|
|
||||||
KafkaCluster cluster) {
|
|
||||||
final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topic);
|
|
||||||
clustersStorage.setKafkaCluster(clusterName, updatedCluster);
|
|
||||||
return updatedCluster;
|
|
||||||
}
|
|
||||||
|
|
||||||
private KafkaCluster updateCluster(String topicToDelete, String clusterName,
|
|
||||||
KafkaCluster cluster) {
|
|
||||||
final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topicToDelete);
|
|
||||||
clustersStorage.setKafkaCluster(clusterName, updatedCluster);
|
|
||||||
return updatedCluster;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<ClusterDTO> updateCluster(String clusterName) {
|
public Mono<ClusterDTO> updateCluster(String clusterName) {
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
return clustersStorage.getClusterByName(clusterName)
|
||||||
.map(cluster -> kafkaService.getUpdatedCluster(cluster)
|
.map(cluster -> metricsService.updateClusterMetrics(cluster)
|
||||||
.doOnNext(updatedCluster -> clustersStorage
|
.doOnNext(updatedCluster -> clustersStorage
|
||||||
.setKafkaCluster(updatedCluster.getName(), updatedCluster))
|
.setKafkaCluster(updatedCluster.getName(), updatedCluster))
|
||||||
.map(clusterMapper::toCluster))
|
.map(clusterMapper::toCluster))
|
||||||
.orElse(Mono.error(new ClusterNotFoundException()));
|
.orElse(Mono.error(new ClusterNotFoundException()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public Flux<TopicMessageEventDTO> getMessages(String clusterName, String topicName,
|
|
||||||
ConsumerPosition consumerPosition, String query,
|
|
||||||
Integer limit) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
|
||||||
.map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit))
|
|
||||||
.orElse(Flux.empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteTopicMessages(String clusterName, String topicName,
|
|
||||||
List<Integer> partitions) {
|
|
||||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
|
||||||
.orElseThrow(ClusterNotFoundException::new);
|
|
||||||
if (!cluster.getTopics().containsKey(topicName)) {
|
|
||||||
throw new TopicNotFoundException();
|
|
||||||
}
|
|
||||||
return consumingService.offsetsForDeletion(cluster, topicName, partitions)
|
|
||||||
.flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<PartitionsIncreaseResponseDTO> increaseTopicPartitions(
|
|
||||||
String clusterName,
|
|
||||||
String topicName,
|
|
||||||
PartitionsIncreaseDTO partitionsIncrease) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
|
||||||
kafkaService.increaseTopicPartitions(cluster, topicName, partitionsIncrease)
|
|
||||||
.doOnNext(t -> updateCluster(t, cluster.getName(), cluster))
|
|
||||||
.map(t -> new PartitionsIncreaseResponseDTO()
|
|
||||||
.topicName(t.getName())
|
|
||||||
.totalPartitionsCount(t.getPartitionCount())))
|
|
||||||
.orElse(Mono.error(new ClusterNotFoundException(
|
|
||||||
String.format("No cluster for name '%s'", clusterName)
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteConsumerGroupById(String clusterName,
|
|
||||||
String groupId) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
|
||||||
.map(cluster -> adminClientService.get(cluster)
|
|
||||||
.flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)))
|
|
||||||
.onErrorResume(this::reThrowCustomException)
|
|
||||||
)
|
|
||||||
.orElse(Mono.empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
public TopicMessageSchemaDTO getTopicSchema(String clusterName, String topicName) {
|
|
||||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
|
||||||
.orElseThrow(ClusterNotFoundException::new);
|
|
||||||
if (!cluster.getTopics().containsKey(topicName)) {
|
|
||||||
throw new TopicNotFoundException();
|
|
||||||
}
|
|
||||||
return deserializationService
|
|
||||||
.getRecordDeserializerForCluster(cluster)
|
|
||||||
.getTopicSchema(topicName);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> sendMessage(String clusterName, String topicName, CreateTopicMessageDTO msg) {
|
|
||||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
|
||||||
.orElseThrow(ClusterNotFoundException::new);
|
|
||||||
if (!cluster.getTopics().containsKey(topicName)) {
|
|
||||||
throw new TopicNotFoundException();
|
|
||||||
}
|
|
||||||
if (msg.getKey() == null && msg.getContent() == null) {
|
|
||||||
throw new ValidationException("Invalid message: both key and value can't be null");
|
|
||||||
}
|
|
||||||
if (msg.getPartition() != null
|
|
||||||
&& msg.getPartition() > cluster.getTopics().get(topicName).getPartitionCount() - 1) {
|
|
||||||
throw new ValidationException("Invalid partition");
|
|
||||||
}
|
|
||||||
return kafkaService.sendMessage(cluster, topicName, msg).then();
|
|
||||||
}
|
|
||||||
|
|
||||||
@NotNull
|
|
||||||
private Mono<Void> reThrowCustomException(Throwable e) {
|
|
||||||
if (e instanceof GroupIdNotFoundException) {
|
|
||||||
return Mono.error(new NotFoundException("The group id does not exist"));
|
|
||||||
} else if (e instanceof GroupNotEmptyException) {
|
|
||||||
return Mono.error(new IllegalEntityStateException("The group is not empty"));
|
|
||||||
} else {
|
|
||||||
return Mono.error(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<ReplicationFactorChangeResponseDTO> changeReplicationFactor(
|
|
||||||
String clusterName,
|
|
||||||
String topicName,
|
|
||||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
|
||||||
kafkaService.changeReplicationFactor(cluster, topicName, replicationFactorChange)
|
|
||||||
.doOnNext(topic -> updateCluster(topic, cluster.getName(), cluster))
|
|
||||||
.map(t -> new ReplicationFactorChangeResponseDTO()
|
|
||||||
.topicName(t.getName())
|
|
||||||
.totalReplicationFactor(t.getReplicationFactor())))
|
|
||||||
.orElse(Mono.error(new ClusterNotFoundException(
|
|
||||||
String.format("No cluster for name '%s'", clusterName))));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Flux<BrokersLogdirsDTO> getAllBrokersLogdirs(String clusterName, List<Integer> brokers) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.flatMap(c -> kafkaService.getClusterLogDirs(c, brokers))
|
|
||||||
.map(describeLogDirsMapper::toBrokerLogDirsList)
|
|
||||||
.flatMapMany(Flux::fromIterable);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateBrokerLogDir(
|
|
||||||
String clusterName, Integer id, BrokerLogdirUpdateDTO brokerLogDir) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.flatMap(c -> kafkaService.updateBrokerLogDir(c, id, brokerLogDir));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateBrokerConfigByName(String clusterName,
|
|
||||||
Integer id,
|
|
||||||
String name,
|
|
||||||
String value) {
|
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.flatMap(c -> kafkaService.updateBrokerConfigByName(c, id, name, value));
|
|
||||||
}
|
|
||||||
}
|
}
|
|
@ -15,7 +15,7 @@ public class ClustersMetricsScheduler {
|
||||||
|
|
||||||
private final ClustersStorage clustersStorage;
|
private final ClustersStorage clustersStorage;
|
||||||
|
|
||||||
private final MetricsUpdateService metricsUpdateService;
|
private final MetricsService metricsService;
|
||||||
|
|
||||||
@Scheduled(fixedRateString = "${kafka.update-metrics-rate-millis:30000}")
|
@Scheduled(fixedRateString = "${kafka.update-metrics-rate-millis:30000}")
|
||||||
public void updateMetrics() {
|
public void updateMetrics() {
|
||||||
|
@ -23,7 +23,10 @@ public class ClustersMetricsScheduler {
|
||||||
.parallel()
|
.parallel()
|
||||||
.runOn(Schedulers.parallel())
|
.runOn(Schedulers.parallel())
|
||||||
.map(Map.Entry::getValue)
|
.map(Map.Entry::getValue)
|
||||||
.flatMap(metricsUpdateService::updateMetrics)
|
.flatMap(cluster -> {
|
||||||
|
log.debug("Start getting metrics for kafkaCluster: {}", cluster.getName());
|
||||||
|
return metricsService.updateClusterMetrics(cluster);
|
||||||
|
})
|
||||||
.doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
|
.doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
|
||||||
.then()
|
.then()
|
||||||
.block();
|
.block();
|
||||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||||
|
import com.provectus.kafka.ui.model.InternalTopic;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -48,8 +49,27 @@ public class ClustersStorage {
|
||||||
return Optional.ofNullable(kafkaClusters.get(clusterName));
|
return Optional.ofNullable(kafkaClusters.get(clusterName));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setKafkaCluster(String key, KafkaCluster kafkaCluster) {
|
public KafkaCluster setKafkaCluster(String key, KafkaCluster kafkaCluster) {
|
||||||
this.kafkaClusters.put(key, kafkaCluster);
|
this.kafkaClusters.put(key, kafkaCluster);
|
||||||
|
return kafkaCluster;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void onTopicDeleted(KafkaCluster cluster, String topicToDelete) {
|
||||||
|
var topics = Optional.ofNullable(cluster.getTopics())
|
||||||
|
.map(HashMap::new)
|
||||||
|
.orElseGet(HashMap::new);
|
||||||
|
topics.remove(topicToDelete);
|
||||||
|
var updatedCluster = cluster.toBuilder().topics(topics).build();
|
||||||
|
setKafkaCluster(cluster.getName(), updatedCluster);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void onTopicUpdated(KafkaCluster cluster, InternalTopic updatedTopic) {
|
||||||
|
var topics = Optional.ofNullable(cluster.getTopics())
|
||||||
|
.map(HashMap::new)
|
||||||
|
.orElseGet(HashMap::new);
|
||||||
|
topics.put(updatedTopic.getName(), updatedTopic);
|
||||||
|
var updatedCluster = cluster.toBuilder().topics(topics).build();
|
||||||
|
setKafkaCluster(cluster.getName(), updatedCluster);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, KafkaCluster> getKafkaClustersMap() {
|
public Map<String, KafkaCluster> getKafkaClustersMap() {
|
||||||
|
|
|
@ -0,0 +1,133 @@
|
||||||
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||||
|
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.UUID;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||||
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||||
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import reactor.core.publisher.Flux;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
|
|
||||||
|
@Service
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
public class ConsumerGroupService {
|
||||||
|
|
||||||
|
private final AdminClientService adminClientService;
|
||||||
|
|
||||||
|
private Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(KafkaCluster cluster) {
|
||||||
|
return adminClientService.get(cluster).flatMap(ac ->
|
||||||
|
ac.listConsumerGroups()
|
||||||
|
.flatMap(groupIds -> getConsumerGroupsInternal(cluster, groupIds)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(KafkaCluster cluster,
|
||||||
|
List<String> groupIds) {
|
||||||
|
return adminClientService.get(cluster).flatMap(ac ->
|
||||||
|
ac.describeConsumerGroups(groupIds)
|
||||||
|
.map(Map::values)
|
||||||
|
.flatMap(descriptions ->
|
||||||
|
Flux.fromIterable(descriptions)
|
||||||
|
.parallel()
|
||||||
|
.flatMap(d ->
|
||||||
|
ac.listConsumerGroupOffsets(d.groupId())
|
||||||
|
.map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets))
|
||||||
|
)
|
||||||
|
.sequential()
|
||||||
|
.collectList()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<List<InternalConsumerGroup>> getConsumerGroups(
|
||||||
|
KafkaCluster cluster, Optional<String> topic, List<String> groupIds) {
|
||||||
|
final Mono<List<InternalConsumerGroup>> consumerGroups;
|
||||||
|
|
||||||
|
if (groupIds.isEmpty()) {
|
||||||
|
consumerGroups = getConsumerGroupsInternal(cluster);
|
||||||
|
} else {
|
||||||
|
consumerGroups = getConsumerGroupsInternal(cluster, groupIds);
|
||||||
|
}
|
||||||
|
|
||||||
|
return consumerGroups.map(c ->
|
||||||
|
c.stream()
|
||||||
|
.map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic))
|
||||||
|
.filter(Optional::isPresent)
|
||||||
|
.map(Optional::get)
|
||||||
|
.map(g ->
|
||||||
|
g.toBuilder().endOffsets(
|
||||||
|
topicPartitionsEndOffsets(cluster, g.getOffsets().keySet())
|
||||||
|
).build()
|
||||||
|
)
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<List<ConsumerGroupDTO>> getConsumerGroups(KafkaCluster cluster) {
|
||||||
|
return getConsumerGroups(cluster, Optional.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<List<ConsumerGroupDTO>> getConsumerGroups(KafkaCluster cluster,
|
||||||
|
Optional<String> topic) {
|
||||||
|
return getConsumerGroups(cluster, topic, Collections.emptyList())
|
||||||
|
.map(c ->
|
||||||
|
c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<TopicPartition, Long> topicPartitionsEndOffsets(
|
||||||
|
KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
|
||||||
|
try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
|
||||||
|
return consumer.endOffsets(topicPartitions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<ConsumerGroupDetailsDTO> getConsumerGroupDetail(KafkaCluster cluster,
|
||||||
|
String consumerGroupId) {
|
||||||
|
return getConsumerGroups(
|
||||||
|
cluster,
|
||||||
|
Optional.empty(),
|
||||||
|
Collections.singletonList(consumerGroupId)
|
||||||
|
).filter(groups -> !groups.isEmpty()).map(groups -> groups.get(0)).map(
|
||||||
|
ClusterUtil::convertToConsumerGroupDetails
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> deleteConsumerGroupById(KafkaCluster cluster,
|
||||||
|
String groupId) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
|
||||||
|
return createConsumer(cluster, Map.of());
|
||||||
|
}
|
||||||
|
|
||||||
|
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
|
||||||
|
Map<String, Object> properties) {
|
||||||
|
Properties props = new Properties();
|
||||||
|
props.putAll(cluster.getProperties());
|
||||||
|
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
|
||||||
|
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||||
|
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||||
|
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||||
|
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||||
|
props.putAll(properties);
|
||||||
|
|
||||||
|
return new KafkaConsumer<>(props);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -1,119 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
|
||||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
|
||||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
|
||||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
|
||||||
import com.provectus.kafka.ui.util.FilterTopicMessageEvents;
|
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.extern.log4j.Log4j2;
|
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import org.springframework.util.StringUtils;
|
|
||||||
import reactor.core.publisher.Flux;
|
|
||||||
import reactor.core.publisher.FluxSink;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
import reactor.core.scheduler.Schedulers;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@Log4j2
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public class ConsumingService {
|
|
||||||
|
|
||||||
private static final int MAX_RECORD_LIMIT = 100;
|
|
||||||
private static final int DEFAULT_RECORD_LIMIT = 20;
|
|
||||||
|
|
||||||
private final KafkaService kafkaService;
|
|
||||||
private final DeserializationService deserializationService;
|
|
||||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
|
||||||
|
|
||||||
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
|
||||||
ConsumerPosition consumerPosition, String query,
|
|
||||||
Integer limit) {
|
|
||||||
int recordsLimit = Optional.ofNullable(limit)
|
|
||||||
.map(s -> Math.min(s, MAX_RECORD_LIMIT))
|
|
||||||
.orElse(DEFAULT_RECORD_LIMIT);
|
|
||||||
|
|
||||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
|
||||||
RecordSerDe recordDeserializer =
|
|
||||||
deserializationService.getRecordDeserializerForCluster(cluster);
|
|
||||||
if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) {
|
|
||||||
emitter = new ForwardRecordEmitter(
|
|
||||||
() -> kafkaService.createConsumer(cluster),
|
|
||||||
new OffsetsSeekForward(topic, consumerPosition),
|
|
||||||
recordDeserializer
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
emitter = new BackwardRecordEmitter(
|
|
||||||
(Map<String, Object> props) -> kafkaService.createConsumer(cluster, props),
|
|
||||||
new OffsetsSeekBackward(topic, consumerPosition, recordsLimit),
|
|
||||||
recordDeserializer
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return Flux.create(emitter)
|
|
||||||
.filter(m -> filterTopicMessage(m, query))
|
|
||||||
.takeWhile(new FilterTopicMessageEvents(recordsLimit))
|
|
||||||
.subscribeOn(Schedulers.elastic())
|
|
||||||
.share();
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
|
|
||||||
List<Integer> partitionsToInclude) {
|
|
||||||
return Mono.fromSupplier(() -> {
|
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
|
|
||||||
return significantOffsets(consumer, topicName, partitionsToInclude);
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.error("Error occurred while consuming records", e);
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* returns end offsets for partitions where start offset != end offsets.
|
|
||||||
* This is useful when we need to verify that partition is not empty.
|
|
||||||
*/
|
|
||||||
public static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
|
|
||||||
String topicName,
|
|
||||||
Collection<Integer>
|
|
||||||
partitionsToInclude) {
|
|
||||||
var partitions = consumer.partitionsFor(topicName).stream()
|
|
||||||
.filter(p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition()))
|
|
||||||
.map(p -> new TopicPartition(topicName, p.partition()))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
var beginningOffsets = consumer.beginningOffsets(partitions);
|
|
||||||
var endOffsets = consumer.endOffsets(partitions);
|
|
||||||
return endOffsets.entrySet().stream()
|
|
||||||
.filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
|
||||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean filterTopicMessage(TopicMessageEventDTO message, String query) {
|
|
||||||
log.info("filter");
|
|
||||||
if (StringUtils.isEmpty(query)
|
|
||||||
|| !message.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
final TopicMessageDTO msg = message.getMessage();
|
|
||||||
return (!StringUtils.isEmpty(msg.getKey()) && msg.getKey().contains(query))
|
|
||||||
|| (!StringUtils.isEmpty(msg.getContent()) && msg.getContent().contains(query));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,15 +1,61 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.Feature;
|
import com.provectus.kafka.ui.model.Feature;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.function.Predicate;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import lombok.extern.log4j.Log4j2;
|
||||||
|
import org.apache.kafka.common.Node;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
public interface FeatureService {
|
@Service
|
||||||
/**
|
@RequiredArgsConstructor
|
||||||
* Get available features.
|
@Log4j2
|
||||||
*
|
public class FeatureService {
|
||||||
* @param cluster - cluster
|
|
||||||
* @return List of Feature
|
private final BrokerService brokerService;
|
||||||
*/
|
|
||||||
Flux<Feature> getAvailableFeatures(KafkaCluster cluster);
|
public Flux<Feature> getAvailableFeatures(KafkaCluster cluster) {
|
||||||
|
List<Mono<Feature>> features = new ArrayList<>();
|
||||||
|
|
||||||
|
if (Optional.ofNullable(cluster.getKafkaConnect())
|
||||||
|
.filter(Predicate.not(List::isEmpty))
|
||||||
|
.isPresent()) {
|
||||||
|
features.add(Mono.just(Feature.KAFKA_CONNECT));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cluster.getKsqldbServer() != null) {
|
||||||
|
features.add(Mono.just(Feature.KSQL_DB));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cluster.getSchemaRegistry() != null) {
|
||||||
|
features.add(Mono.just(Feature.SCHEMA_REGISTRY));
|
||||||
|
}
|
||||||
|
|
||||||
|
features.add(
|
||||||
|
isTopicDeletionEnabled(cluster)
|
||||||
|
.flatMap(r -> r ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty())
|
||||||
|
);
|
||||||
|
|
||||||
|
return Flux.fromIterable(features).flatMap(m -> m);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<Boolean> isTopicDeletionEnabled(KafkaCluster cluster) {
|
||||||
|
return brokerService.getController(cluster)
|
||||||
|
.map(Node::id)
|
||||||
|
.flatMap(broker -> brokerService.getBrokerConfigMap(cluster, broker))
|
||||||
|
.map(config -> {
|
||||||
|
if (config != null && config.get(DELETE_TOPIC_ENABLE) != null) {
|
||||||
|
return Boolean.parseBoolean(config.get(DELETE_TOPIC_ENABLE).getValue());
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,62 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.Feature;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.function.Predicate;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.extern.log4j.Log4j2;
|
|
||||||
import org.apache.kafka.common.Node;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import reactor.core.publisher.Flux;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@Log4j2
|
|
||||||
public class FeatureServiceImpl implements FeatureService {
|
|
||||||
|
|
||||||
private final BrokerService brokerService;
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Flux<Feature> getAvailableFeatures(KafkaCluster cluster) {
|
|
||||||
List<Mono<Feature>> features = new ArrayList<>();
|
|
||||||
|
|
||||||
if (Optional.ofNullable(cluster.getKafkaConnect())
|
|
||||||
.filter(Predicate.not(List::isEmpty))
|
|
||||||
.isPresent()) {
|
|
||||||
features.add(Mono.just(Feature.KAFKA_CONNECT));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cluster.getKsqldbServer() != null) {
|
|
||||||
features.add(Mono.just(Feature.KSQL_DB));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cluster.getSchemaRegistry() != null) {
|
|
||||||
features.add(Mono.just(Feature.SCHEMA_REGISTRY));
|
|
||||||
}
|
|
||||||
|
|
||||||
features.add(
|
|
||||||
topicDeletionCheck(cluster)
|
|
||||||
.flatMap(r -> r ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty())
|
|
||||||
);
|
|
||||||
|
|
||||||
return Flux.fromIterable(features).flatMap(m -> m);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<Boolean> topicDeletionCheck(KafkaCluster cluster) {
|
|
||||||
return brokerService.getController(cluster)
|
|
||||||
.map(Node::id)
|
|
||||||
.flatMap(broker -> brokerService.getBrokerConfigMap(cluster, broker))
|
|
||||||
.map(config -> {
|
|
||||||
if (config != null && config.get(DELETE_TOPIC_ENABLE) != null) {
|
|
||||||
return Boolean.parseBoolean(config.get(DELETE_TOPIC_ENABLE).getValue());
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -21,7 +21,6 @@ import com.provectus.kafka.ui.model.KafkaConnectCluster;
|
||||||
import com.provectus.kafka.ui.model.NewConnectorDTO;
|
import com.provectus.kafka.ui.model.NewConnectorDTO;
|
||||||
import com.provectus.kafka.ui.model.TaskDTO;
|
import com.provectus.kafka.ui.model.TaskDTO;
|
||||||
import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
|
import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
@ -47,25 +46,24 @@ public class KafkaConnectService {
|
||||||
private final KafkaConnectMapper kafkaConnectMapper;
|
private final KafkaConnectMapper kafkaConnectMapper;
|
||||||
private final ObjectMapper objectMapper;
|
private final ObjectMapper objectMapper;
|
||||||
|
|
||||||
public Mono<Flux<ConnectDTO>> getConnects(String clusterName) {
|
public Mono<Flux<ConnectDTO>> getConnects(KafkaCluster cluster) {
|
||||||
return Mono.just(
|
return Mono.just(
|
||||||
Flux.fromIterable(clustersStorage.getClusterByName(clusterName)
|
Flux.fromIterable(
|
||||||
.map(KafkaCluster::getKafkaConnect).stream()
|
cluster.getKafkaConnect().stream()
|
||||||
.flatMap(Collection::stream)
|
|
||||||
.map(clusterMapper::toKafkaConnect)
|
.map(clusterMapper::toKafkaConnect)
|
||||||
.collect(Collectors.toList())
|
.collect(Collectors.toList())
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Flux<FullConnectorInfoDTO> getAllConnectors(final String clusterName,
|
public Flux<FullConnectorInfoDTO> getAllConnectors(final KafkaCluster cluster,
|
||||||
final String search) {
|
final String search) {
|
||||||
return getConnects(clusterName)
|
return getConnects(cluster)
|
||||||
.flatMapMany(Function.identity())
|
.flatMapMany(Function.identity())
|
||||||
.flatMap(connect -> getConnectorNames(clusterName, connect))
|
.flatMap(connect -> getConnectorNames(cluster, connect))
|
||||||
.flatMap(pair -> getConnector(clusterName, pair.getT1(), pair.getT2()))
|
.flatMap(pair -> getConnector(cluster, pair.getT1(), pair.getT2()))
|
||||||
.flatMap(connector ->
|
.flatMap(connector ->
|
||||||
getConnectorConfig(clusterName, connector.getConnect(), connector.getName())
|
getConnectorConfig(cluster, connector.getConnect(), connector.getName())
|
||||||
.map(config -> InternalConnectInfo.builder()
|
.map(config -> InternalConnectInfo.builder()
|
||||||
.connector(connector)
|
.connector(connector)
|
||||||
.config(config)
|
.config(config)
|
||||||
|
@ -74,7 +72,7 @@ public class KafkaConnectService {
|
||||||
)
|
)
|
||||||
.flatMap(connectInfo -> {
|
.flatMap(connectInfo -> {
|
||||||
ConnectorDTO connector = connectInfo.getConnector();
|
ConnectorDTO connector = connectInfo.getConnector();
|
||||||
return getConnectorTasks(clusterName, connector.getConnect(), connector.getName())
|
return getConnectorTasks(cluster, connector.getConnect(), connector.getName())
|
||||||
.collectList()
|
.collectList()
|
||||||
.map(tasks -> InternalConnectInfo.builder()
|
.map(tasks -> InternalConnectInfo.builder()
|
||||||
.connector(connector)
|
.connector(connector)
|
||||||
|
@ -85,7 +83,7 @@ public class KafkaConnectService {
|
||||||
})
|
})
|
||||||
.flatMap(connectInfo -> {
|
.flatMap(connectInfo -> {
|
||||||
ConnectorDTO connector = connectInfo.getConnector();
|
ConnectorDTO connector = connectInfo.getConnector();
|
||||||
return getConnectorTopics(clusterName, connector.getConnect(), connector.getName())
|
return getConnectorTopics(cluster, connector.getConnect(), connector.getName())
|
||||||
.map(ct -> InternalConnectInfo.builder()
|
.map(ct -> InternalConnectInfo.builder()
|
||||||
.connector(connector)
|
.connector(connector)
|
||||||
.config(connectInfo.getConfig())
|
.config(connectInfo.getConfig())
|
||||||
|
@ -115,9 +113,9 @@ public class KafkaConnectService {
|
||||||
.map(String::toUpperCase);
|
.map(String::toUpperCase);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<ConnectorTopics> getConnectorTopics(String clusterName, String connectClusterName,
|
private Mono<ConnectorTopics> getConnectorTopics(KafkaCluster cluster, String connectClusterName,
|
||||||
String connectorName) {
|
String connectorName) {
|
||||||
return getConnectAddress(clusterName, connectClusterName)
|
return getConnectAddress(cluster, connectClusterName)
|
||||||
.flatMap(connectUrl -> KafkaConnectClients
|
.flatMap(connectUrl -> KafkaConnectClients
|
||||||
.withBaseUrl(connectUrl)
|
.withBaseUrl(connectUrl)
|
||||||
.getConnectorTopics(connectorName)
|
.getConnectorTopics(connectorName)
|
||||||
|
@ -125,8 +123,8 @@ public class KafkaConnectService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Flux<Tuple2<String, String>> getConnectorNames(String clusterName, ConnectDTO connect) {
|
private Flux<Tuple2<String, String>> getConnectorNames(KafkaCluster cluster, ConnectDTO connect) {
|
||||||
return getConnectors(clusterName, connect.getName())
|
return getConnectors(cluster, connect.getName())
|
||||||
.collectList().map(e -> e.get(0))
|
.collectList().map(e -> e.get(0))
|
||||||
// for some reason `getConnectors` method returns the response as a single string
|
// for some reason `getConnectors` method returns the response as a single string
|
||||||
.map(this::parseToList)
|
.map(this::parseToList)
|
||||||
|
@ -140,30 +138,30 @@ public class KafkaConnectService {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public Flux<String> getConnectors(String clusterName, String connectName) {
|
public Flux<String> getConnectors(KafkaCluster cluster, String connectName) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMapMany(connect ->
|
.flatMapMany(connect ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).getConnectors(null)
|
KafkaConnectClients.withBaseUrl(connect).getConnectors(null)
|
||||||
.doOnError(log::error)
|
.doOnError(log::error)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<ConnectorDTO> createConnector(String clusterName, String connectName,
|
public Mono<ConnectorDTO> createConnector(KafkaCluster cluster, String connectName,
|
||||||
Mono<NewConnectorDTO> connector) {
|
Mono<NewConnectorDTO> connector) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect ->
|
.flatMap(connect ->
|
||||||
connector
|
connector
|
||||||
.map(kafkaConnectMapper::toClient)
|
.map(kafkaConnectMapper::toClient)
|
||||||
.flatMap(c ->
|
.flatMap(c ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).createConnector(c)
|
KafkaConnectClients.withBaseUrl(connect).createConnector(c)
|
||||||
)
|
)
|
||||||
.flatMap(c -> getConnector(clusterName, connectName, c.getName()))
|
.flatMap(c -> getConnector(cluster, connectName, c.getName()))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<ConnectorDTO> getConnector(String clusterName, String connectName,
|
public Mono<ConnectorDTO> getConnector(KafkaCluster cluster, String connectName,
|
||||||
String connectorName) {
|
String connectorName) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).getConnector(connectorName)
|
.flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).getConnector(connectorName)
|
||||||
.map(kafkaConnectMapper::fromClient)
|
.map(kafkaConnectMapper::fromClient)
|
||||||
.flatMap(connector ->
|
.flatMap(connector ->
|
||||||
|
@ -193,17 +191,17 @@ public class KafkaConnectService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<String, Object>> getConnectorConfig(String clusterName, String connectName,
|
public Mono<Map<String, Object>> getConnectorConfig(KafkaCluster cluster, String connectName,
|
||||||
String connectorName) {
|
String connectorName) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect ->
|
.flatMap(connect ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).getConnectorConfig(connectorName)
|
KafkaConnectClients.withBaseUrl(connect).getConnectorConfig(connectorName)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<ConnectorDTO> setConnectorConfig(String clusterName, String connectName,
|
public Mono<ConnectorDTO> setConnectorConfig(KafkaCluster cluster, String connectName,
|
||||||
String connectorName, Mono<Object> requestBody) {
|
String connectorName, Mono<Object> requestBody) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect ->
|
.flatMap(connect ->
|
||||||
requestBody.flatMap(body ->
|
requestBody.flatMap(body ->
|
||||||
KafkaConnectClients.withBaseUrl(connect)
|
KafkaConnectClients.withBaseUrl(connect)
|
||||||
|
@ -213,14 +211,15 @@ public class KafkaConnectService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> deleteConnector(String clusterName, String connectName, String connectorName) {
|
public Mono<Void> deleteConnector(
|
||||||
return getConnectAddress(clusterName, connectName)
|
KafkaCluster cluster, String connectName, String connectorName) {
|
||||||
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect ->
|
.flatMap(connect ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).deleteConnector(connectorName)
|
KafkaConnectClients.withBaseUrl(connect).deleteConnector(connectorName)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> updateConnectorState(String clusterName, String connectName,
|
public Mono<Void> updateConnectorState(KafkaCluster cluster, String connectName,
|
||||||
String connectorName, ConnectorActionDTO action) {
|
String connectorName, ConnectorActionDTO action) {
|
||||||
Function<String, Mono<Void>> kafkaClientCall;
|
Function<String, Mono<Void>> kafkaClientCall;
|
||||||
switch (action) {
|
switch (action) {
|
||||||
|
@ -239,13 +238,13 @@ public class KafkaConnectService {
|
||||||
default:
|
default:
|
||||||
throw new IllegalStateException("Unexpected value: " + action);
|
throw new IllegalStateException("Unexpected value: " + action);
|
||||||
}
|
}
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(kafkaClientCall);
|
.flatMap(kafkaClientCall);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Flux<TaskDTO> getConnectorTasks(String clusterName, String connectName,
|
public Flux<TaskDTO> getConnectorTasks(KafkaCluster cluster, String connectName,
|
||||||
String connectorName) {
|
String connectorName) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMapMany(connect ->
|
.flatMapMany(connect ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).getConnectorTasks(connectorName)
|
KafkaConnectClients.withBaseUrl(connect).getConnectorTasks(connectorName)
|
||||||
.map(kafkaConnectMapper::fromClient)
|
.map(kafkaConnectMapper::fromClient)
|
||||||
|
@ -258,17 +257,17 @@ public class KafkaConnectService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> restartConnectorTask(String clusterName, String connectName,
|
public Mono<Void> restartConnectorTask(KafkaCluster cluster, String connectName,
|
||||||
String connectorName, Integer taskId) {
|
String connectorName, Integer taskId) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect ->
|
.flatMap(connect ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).restartConnectorTask(connectorName, taskId)
|
KafkaConnectClients.withBaseUrl(connect).restartConnectorTask(connectorName, taskId)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Flux<ConnectorPluginDTO>> getConnectorPlugins(String clusterName,
|
public Mono<Flux<ConnectorPluginDTO>> getConnectorPlugins(KafkaCluster cluster,
|
||||||
String connectName) {
|
String connectName) {
|
||||||
return Mono.just(getConnectAddress(clusterName, connectName)
|
return Mono.just(getConnectAddress(cluster, connectName)
|
||||||
.flatMapMany(connect ->
|
.flatMapMany(connect ->
|
||||||
KafkaConnectClients.withBaseUrl(connect).getConnectorPlugins()
|
KafkaConnectClients.withBaseUrl(connect).getConnectorPlugins()
|
||||||
.map(kafkaConnectMapper::fromClient)
|
.map(kafkaConnectMapper::fromClient)
|
||||||
|
@ -276,8 +275,8 @@ public class KafkaConnectService {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<ConnectorPluginConfigValidationResponseDTO> validateConnectorPluginConfig(
|
public Mono<ConnectorPluginConfigValidationResponseDTO> validateConnectorPluginConfig(
|
||||||
String clusterName, String connectName, String pluginName, Mono<Object> requestBody) {
|
KafkaCluster cluster, String connectName, String pluginName, Mono<Object> requestBody) {
|
||||||
return getConnectAddress(clusterName, connectName)
|
return getConnectAddress(cluster, connectName)
|
||||||
.flatMap(connect ->
|
.flatMap(connect ->
|
||||||
requestBody.flatMap(body ->
|
requestBody.flatMap(body ->
|
||||||
KafkaConnectClients.withBaseUrl(connect)
|
KafkaConnectClients.withBaseUrl(connect)
|
||||||
|
@ -293,17 +292,11 @@ public class KafkaConnectService {
|
||||||
.orElse(Mono.error(ClusterNotFoundException::new));
|
.orElse(Mono.error(ClusterNotFoundException::new));
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<String> getConnectAddress(String clusterName, String connectName) {
|
private Mono<String> getConnectAddress(KafkaCluster cluster, String connectName) {
|
||||||
return getCluster(clusterName)
|
return Mono.justOrEmpty(cluster.getKafkaConnect().stream()
|
||||||
.map(kafkaCluster ->
|
|
||||||
kafkaCluster.getKafkaConnect().stream()
|
|
||||||
.filter(connect -> connect.getName().equals(connectName))
|
.filter(connect -> connect.getName().equals(connectName))
|
||||||
.findFirst()
|
.findFirst()
|
||||||
.map(KafkaConnectCluster::getAddress)
|
.map(KafkaConnectCluster::getAddress))
|
||||||
)
|
.switchIfEmpty(Mono.error(ConnectNotFoundException::new));
|
||||||
.flatMap(connect -> connect
|
|
||||||
.map(Mono::just)
|
|
||||||
.orElse(Mono.error(ConnectNotFoundException::new))
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,870 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.exception.InvalidRequestApiException;
|
|
||||||
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
|
|
||||||
import com.provectus.kafka.ui.exception.TopicMetadataException;
|
|
||||||
import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.ValidationException;
|
|
||||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
|
||||||
import com.provectus.kafka.ui.model.CleanupPolicy;
|
|
||||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
|
||||||
import com.provectus.kafka.ui.model.InternalClusterMetrics;
|
|
||||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
|
||||||
import com.provectus.kafka.ui.model.InternalPartition;
|
|
||||||
import com.provectus.kafka.ui.model.InternalReplica;
|
|
||||||
import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
|
|
||||||
import com.provectus.kafka.ui.model.InternalTopic;
|
|
||||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.MetricDTO;
|
|
||||||
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
|
|
||||||
import com.provectus.kafka.ui.model.ServerStatusDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
|
||||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
|
||||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
|
||||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
|
||||||
import com.provectus.kafka.ui.util.JmxClusterUtil;
|
|
||||||
import com.provectus.kafka.ui.util.JmxMetricsName;
|
|
||||||
import com.provectus.kafka.ui.util.JmxMetricsValueName;
|
|
||||||
import java.math.BigDecimal;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.Comparator;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.LongSummaryStatistics;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Properties;
|
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.concurrent.CompletableFuture;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import lombok.extern.log4j.Log4j2;
|
|
||||||
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
|
||||||
import org.apache.kafka.clients.admin.NewPartitions;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
|
||||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
|
||||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
|
||||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
|
||||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
|
||||||
import org.apache.kafka.common.Node;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
|
||||||
import org.apache.kafka.common.TopicPartitionReplica;
|
|
||||||
import org.apache.kafka.common.errors.InvalidRequestException;
|
|
||||||
import org.apache.kafka.common.errors.LogDirNotFoundException;
|
|
||||||
import org.apache.kafka.common.errors.TimeoutException;
|
|
||||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
|
||||||
import org.apache.kafka.common.header.Header;
|
|
||||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
|
||||||
import org.apache.kafka.common.header.internals.RecordHeaders;
|
|
||||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
|
||||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
|
||||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import reactor.core.publisher.Flux;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
import reactor.util.function.Tuple2;
|
|
||||||
import reactor.util.function.Tuple3;
|
|
||||||
import reactor.util.function.Tuples;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@Log4j2
|
|
||||||
public class KafkaService {
|
|
||||||
|
|
||||||
private final ZookeeperService zookeeperService;
|
|
||||||
private final JmxClusterUtil jmxClusterUtil;
|
|
||||||
private final ClustersStorage clustersStorage;
|
|
||||||
private final DeserializationService deserializationService;
|
|
||||||
private final AdminClientService adminClientService;
|
|
||||||
private final FeatureService featureService;
|
|
||||||
|
|
||||||
|
|
||||||
public KafkaCluster getUpdatedCluster(KafkaCluster cluster, InternalTopic updatedTopic) {
|
|
||||||
final Map<String, InternalTopic> topics =
|
|
||||||
Optional.ofNullable(cluster.getTopics()).map(
|
|
||||||
t -> new HashMap<>(cluster.getTopics())
|
|
||||||
).orElse(new HashMap<>());
|
|
||||||
topics.put(updatedTopic.getName(), updatedTopic);
|
|
||||||
return cluster.toBuilder().topics(topics).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
public KafkaCluster getUpdatedCluster(KafkaCluster cluster, String topicToDelete) {
|
|
||||||
final Map<String, InternalTopic> topics = new HashMap<>(cluster.getTopics());
|
|
||||||
topics.remove(topicToDelete);
|
|
||||||
return cluster.toBuilder().topics(topics).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(
|
|
||||||
ac -> ac.getClusterVersion().flatMap(
|
|
||||||
version ->
|
|
||||||
getClusterMetrics(ac)
|
|
||||||
.flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac))
|
|
||||||
.flatMap(clusterMetrics ->
|
|
||||||
getTopicsData(ac).flatMap(it -> {
|
|
||||||
if (cluster.getDisableLogDirsCollection() == null
|
|
||||||
|| !cluster.getDisableLogDirsCollection()) {
|
|
||||||
return updateSegmentMetrics(ac, clusterMetrics, it
|
|
||||||
);
|
|
||||||
} else {
|
|
||||||
return emptySegmentMetrics(clusterMetrics, it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto))
|
|
||||||
)
|
|
||||||
)
|
|
||||||
).flatMap(
|
|
||||||
nc -> featureService.getAvailableFeatures(cluster).collectList()
|
|
||||||
.map(f -> nc.toBuilder().features(f).build())
|
|
||||||
).doOnError(e ->
|
|
||||||
log.error("Failed to collect cluster {} info", cluster.getName(), e)
|
|
||||||
).onErrorResume(
|
|
||||||
e -> Mono.just(cluster.toBuilder()
|
|
||||||
.status(ServerStatusDTO.OFFLINE)
|
|
||||||
.lastKafkaException(e)
|
|
||||||
.build())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
private KafkaCluster buildFromData(KafkaCluster currentCluster,
|
|
||||||
String version,
|
|
||||||
InternalSegmentSizeDto segmentSizeDto) {
|
|
||||||
|
|
||||||
var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
|
|
||||||
var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
|
|
||||||
var brokersIds = new ArrayList<>(brokersMetrics.getInternalBrokerMetrics().keySet());
|
|
||||||
|
|
||||||
InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
|
|
||||||
brokersMetrics.toBuilder();
|
|
||||||
|
|
||||||
InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics);
|
|
||||||
|
|
||||||
ServerStatusDTO zookeeperStatus = ServerStatusDTO.OFFLINE;
|
|
||||||
Throwable zookeeperException = null;
|
|
||||||
try {
|
|
||||||
zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster)
|
|
||||||
? ServerStatusDTO.ONLINE
|
|
||||||
: ServerStatusDTO.OFFLINE;
|
|
||||||
} catch (Throwable e) {
|
|
||||||
zookeeperException = e;
|
|
||||||
}
|
|
||||||
|
|
||||||
InternalClusterMetrics clusterMetrics = metricsBuilder
|
|
||||||
.activeControllers(brokersMetrics.getActiveControllers())
|
|
||||||
.topicCount(topicsMetrics.getTopicCount())
|
|
||||||
.brokerCount(brokersMetrics.getBrokerCount())
|
|
||||||
.underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount())
|
|
||||||
.inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount())
|
|
||||||
.outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount())
|
|
||||||
.onlinePartitionCount(topicsMetrics.getOnlinePartitionCount())
|
|
||||||
.offlinePartitionCount(topicsMetrics.getOfflinePartitionCount())
|
|
||||||
.zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus))
|
|
||||||
.version(version)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
return currentCluster.toBuilder()
|
|
||||||
.version(version)
|
|
||||||
.status(ServerStatusDTO.ONLINE)
|
|
||||||
.zookeeperStatus(zookeeperStatus)
|
|
||||||
.lastZookeeperException(zookeeperException)
|
|
||||||
.lastKafkaException(null)
|
|
||||||
.metrics(clusterMetrics)
|
|
||||||
.topics(topics)
|
|
||||||
.brokers(brokersIds)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
private InternalClusterMetrics collectTopicsMetrics(Map<String, InternalTopic> topics) {
|
|
||||||
|
|
||||||
int underReplicatedPartitions = 0;
|
|
||||||
int inSyncReplicasCount = 0;
|
|
||||||
int outOfSyncReplicasCount = 0;
|
|
||||||
int onlinePartitionCount = 0;
|
|
||||||
int offlinePartitionCount = 0;
|
|
||||||
|
|
||||||
for (InternalTopic topic : topics.values()) {
|
|
||||||
underReplicatedPartitions += topic.getUnderReplicatedPartitions();
|
|
||||||
inSyncReplicasCount += topic.getInSyncReplicas();
|
|
||||||
outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas());
|
|
||||||
onlinePartitionCount +=
|
|
||||||
topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1)
|
|
||||||
.sum();
|
|
||||||
offlinePartitionCount +=
|
|
||||||
topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1)
|
|
||||||
.sum();
|
|
||||||
}
|
|
||||||
|
|
||||||
return InternalClusterMetrics.builder()
|
|
||||||
.underReplicatedPartitionCount(underReplicatedPartitions)
|
|
||||||
.inSyncReplicasCount(inSyncReplicasCount)
|
|
||||||
.outOfSyncReplicasCount(outOfSyncReplicasCount)
|
|
||||||
.onlinePartitionCount(onlinePartitionCount)
|
|
||||||
.offlinePartitionCount(offlinePartitionCount)
|
|
||||||
.topicCount(topics.size())
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, InternalTopic> mergeWithConfigs(
|
|
||||||
List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
|
|
||||||
return topics.stream()
|
|
||||||
.map(t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build())
|
|
||||||
.map(t -> t.toBuilder().cleanUpPolicy(
|
|
||||||
CleanupPolicy.fromString(t.getTopicConfigs().stream()
|
|
||||||
.filter(config -> config.getName().equals("cleanup.policy"))
|
|
||||||
.findFirst()
|
|
||||||
.orElseGet(() -> InternalTopicConfig.builder().value("unknown").build())
|
|
||||||
.getValue())).build())
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
InternalTopic::getName,
|
|
||||||
e -> e
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private Mono<List<InternalTopic>> getTopicsData(ReactiveAdminClient client) {
|
|
||||||
return client.listTopics(true)
|
|
||||||
.flatMap(topics -> getTopicsData(client, topics).collectList());
|
|
||||||
}
|
|
||||||
|
|
||||||
private Flux<InternalTopic> getTopicsData(ReactiveAdminClient client, Collection<String> topics) {
|
|
||||||
final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
|
|
||||||
loadTopicsConfig(client, topics);
|
|
||||||
|
|
||||||
return client.describeTopics(topics)
|
|
||||||
.map(m -> m.values().stream()
|
|
||||||
.map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()))
|
|
||||||
.flatMap(internalTopics -> configsMono
|
|
||||||
.map(configs -> mergeWithConfigs(internalTopics, configs).values()))
|
|
||||||
.flatMapMany(Flux::fromIterable);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
private Mono<InternalClusterMetrics> getClusterMetrics(ReactiveAdminClient client) {
|
|
||||||
return client.describeCluster().map(desc ->
|
|
||||||
InternalClusterMetrics.builder()
|
|
||||||
.brokerCount(desc.getNodes().size())
|
|
||||||
.activeControllers(desc.getController() != null ? 1 : 0)
|
|
||||||
.build()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
public Mono<InternalTopic> createTopic(ReactiveAdminClient adminClient,
|
|
||||||
Mono<TopicCreationDTO> topicCreation) {
|
|
||||||
return topicCreation.flatMap(topicData ->
|
|
||||||
adminClient.createTopic(
|
|
||||||
topicData.getName(),
|
|
||||||
topicData.getPartitions(),
|
|
||||||
topicData.getReplicationFactor().shortValue(),
|
|
||||||
topicData.getConfigs()
|
|
||||||
).thenReturn(topicData)
|
|
||||||
)
|
|
||||||
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
|
||||||
.flatMap(topicData -> getUpdatedTopic(adminClient, topicData.getName()))
|
|
||||||
.switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<InternalTopic> createTopic(
|
|
||||||
KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
|
||||||
return adminClientService.get(cluster).flatMap(ac -> createTopic(ac, topicCreation));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
|
||||||
return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName));
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
|
|
||||||
ReactiveAdminClient client, Collection<String> topicNames) {
|
|
||||||
return client.getTopicsConfig(topicNames)
|
|
||||||
.map(configs ->
|
|
||||||
configs.entrySet().stream().collect(Collectors.toMap(
|
|
||||||
Map.Entry::getKey,
|
|
||||||
c -> c.getValue().stream()
|
|
||||||
.map(ClusterUtil::mapToInternalTopicConfig)
|
|
||||||
.collect(Collectors.toList()))));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(KafkaCluster cluster) {
|
|
||||||
return adminClientService.get(cluster).flatMap(ac ->
|
|
||||||
ac.listConsumerGroups()
|
|
||||||
.flatMap(groupIds -> getConsumerGroupsInternal(cluster, groupIds)));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(KafkaCluster cluster,
|
|
||||||
List<String> groupIds) {
|
|
||||||
return adminClientService.get(cluster).flatMap(ac ->
|
|
||||||
ac.describeConsumerGroups(groupIds)
|
|
||||||
.map(Map::values)
|
|
||||||
.flatMap(descriptions ->
|
|
||||||
Flux.fromIterable(descriptions)
|
|
||||||
.parallel()
|
|
||||||
.flatMap(d ->
|
|
||||||
ac.listConsumerGroupOffsets(d.groupId())
|
|
||||||
.map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets))
|
|
||||||
)
|
|
||||||
.sequential()
|
|
||||||
.collectList()));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<List<InternalConsumerGroup>> getConsumerGroups(
|
|
||||||
KafkaCluster cluster, Optional<String> topic, List<String> groupIds) {
|
|
||||||
final Mono<List<InternalConsumerGroup>> consumerGroups;
|
|
||||||
|
|
||||||
if (groupIds.isEmpty()) {
|
|
||||||
consumerGroups = getConsumerGroupsInternal(cluster);
|
|
||||||
} else {
|
|
||||||
consumerGroups = getConsumerGroupsInternal(cluster, groupIds);
|
|
||||||
}
|
|
||||||
|
|
||||||
return consumerGroups.map(c ->
|
|
||||||
c.stream()
|
|
||||||
.map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic))
|
|
||||||
.filter(Optional::isPresent)
|
|
||||||
.map(Optional::get)
|
|
||||||
.map(g ->
|
|
||||||
g.toBuilder().endOffsets(
|
|
||||||
topicPartitionsEndOffsets(cluster, g.getOffsets().keySet())
|
|
||||||
).build()
|
|
||||||
)
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<TopicPartition, Long> topicPartitionsEndOffsets(
|
|
||||||
KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
|
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
|
|
||||||
return consumer.endOffsets(topicPartitions);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
|
|
||||||
return createConsumer(cluster, Map.of());
|
|
||||||
}
|
|
||||||
|
|
||||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
|
|
||||||
Map<String, Object> properties) {
|
|
||||||
Properties props = new Properties();
|
|
||||||
props.putAll(cluster.getProperties());
|
|
||||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
|
|
||||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
|
||||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
|
||||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
|
||||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
|
||||||
props.putAll(properties);
|
|
||||||
|
|
||||||
return new KafkaConsumer<>(props);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
public Mono<InternalTopic> updateTopic(KafkaCluster cluster,
|
|
||||||
String topicName,
|
|
||||||
TopicUpdateDTO topicUpdate) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac ->
|
|
||||||
ac.updateTopicConfig(topicName,
|
|
||||||
topicUpdate.getConfigs()).then(getUpdatedTopic(ac, topicName)));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<InternalTopic> getUpdatedTopic(ReactiveAdminClient ac, String topicName) {
|
|
||||||
return getTopicsData(ac, List.of(topicName)).next();
|
|
||||||
}
|
|
||||||
|
|
||||||
private InternalTopic mergeWithStats(InternalTopic topic,
|
|
||||||
Map<String, LongSummaryStatistics> topics,
|
|
||||||
Map<TopicPartition, LongSummaryStatistics> partitions) {
|
|
||||||
final LongSummaryStatistics stats = topics.get(topic.getName());
|
|
||||||
|
|
||||||
return topic.toBuilder()
|
|
||||||
.segmentSize(stats.getSum())
|
|
||||||
.segmentCount(stats.getCount())
|
|
||||||
.partitions(
|
|
||||||
topic.getPartitions().entrySet().stream().map(e ->
|
|
||||||
Tuples.of(e.getKey(), mergeWithStats(topic.getName(), e.getValue(), partitions))
|
|
||||||
).collect(Collectors.toMap(
|
|
||||||
Tuple2::getT1,
|
|
||||||
Tuple2::getT2
|
|
||||||
))
|
|
||||||
).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
private InternalPartition mergeWithStats(String topic, InternalPartition partition,
|
|
||||||
Map<TopicPartition, LongSummaryStatistics> partitions) {
|
|
||||||
final LongSummaryStatistics stats =
|
|
||||||
partitions.get(new TopicPartition(topic, partition.getPartition()));
|
|
||||||
return partition.toBuilder()
|
|
||||||
.segmentSize(stats.getSum())
|
|
||||||
.segmentCount(stats.getCount())
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<InternalSegmentSizeDto> emptySegmentMetrics(InternalClusterMetrics clusterMetrics,
|
|
||||||
List<InternalTopic> internalTopics) {
|
|
||||||
return Mono.just(
|
|
||||||
InternalSegmentSizeDto.builder()
|
|
||||||
.clusterMetricsWithSegmentSize(
|
|
||||||
clusterMetrics.toBuilder()
|
|
||||||
.segmentSize(0)
|
|
||||||
.segmentCount(0)
|
|
||||||
.internalBrokerDiskUsage(Collections.emptyMap())
|
|
||||||
.build()
|
|
||||||
)
|
|
||||||
.internalTopicWithSegmentSize(
|
|
||||||
internalTopics.stream().collect(
|
|
||||||
Collectors.toMap(
|
|
||||||
InternalTopic::getName,
|
|
||||||
i -> i
|
|
||||||
)
|
|
||||||
)
|
|
||||||
).build()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<InternalSegmentSizeDto> updateSegmentMetrics(ReactiveAdminClient ac,
|
|
||||||
InternalClusterMetrics clusterMetrics,
|
|
||||||
List<InternalTopic> internalTopics) {
|
|
||||||
return ac.describeCluster().flatMap(
|
|
||||||
clusterDescription ->
|
|
||||||
ac.describeLogDirs().map(log -> {
|
|
||||||
final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
|
|
||||||
log.entrySet().stream().flatMap(b ->
|
|
||||||
b.getValue().entrySet().stream().flatMap(topicMap ->
|
|
||||||
topicMap.getValue().replicaInfos.entrySet().stream()
|
|
||||||
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
|
|
||||||
)
|
|
||||||
).collect(Collectors.toList());
|
|
||||||
|
|
||||||
final Map<TopicPartition, LongSummaryStatistics> partitionStats =
|
|
||||||
topicPartitions.stream().collect(
|
|
||||||
Collectors.groupingBy(
|
|
||||||
Tuple2::getT2,
|
|
||||||
Collectors.summarizingLong(Tuple3::getT3)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
final Map<String, LongSummaryStatistics> topicStats =
|
|
||||||
topicPartitions.stream().collect(
|
|
||||||
Collectors.groupingBy(
|
|
||||||
t -> t.getT2().topic(),
|
|
||||||
Collectors.summarizingLong(Tuple3::getT3)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
final Map<Integer, LongSummaryStatistics> brokerStats =
|
|
||||||
topicPartitions.stream().collect(
|
|
||||||
Collectors.groupingBy(
|
|
||||||
Tuple2::getT1,
|
|
||||||
Collectors.summarizingLong(Tuple3::getT3)
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
final LongSummaryStatistics summary =
|
|
||||||
topicPartitions.stream().collect(Collectors.summarizingLong(Tuple3::getT3));
|
|
||||||
|
|
||||||
|
|
||||||
final Map<String, InternalTopic> resultTopics = internalTopics.stream().map(e ->
|
|
||||||
Tuples.of(e.getName(), mergeWithStats(e, topicStats, partitionStats))
|
|
||||||
).collect(Collectors.toMap(
|
|
||||||
Tuple2::getT1,
|
|
||||||
Tuple2::getT2
|
|
||||||
));
|
|
||||||
|
|
||||||
final Map<Integer, InternalBrokerDiskUsage> resultBrokers =
|
|
||||||
brokerStats.entrySet().stream().map(e ->
|
|
||||||
Tuples.of(e.getKey(), InternalBrokerDiskUsage.builder()
|
|
||||||
.segmentSize(e.getValue().getSum())
|
|
||||||
.segmentCount(e.getValue().getCount())
|
|
||||||
.build()
|
|
||||||
)
|
|
||||||
).collect(Collectors.toMap(
|
|
||||||
Tuple2::getT1,
|
|
||||||
Tuple2::getT2
|
|
||||||
));
|
|
||||||
|
|
||||||
return InternalSegmentSizeDto.builder()
|
|
||||||
.clusterMetricsWithSegmentSize(
|
|
||||||
clusterMetrics.toBuilder()
|
|
||||||
.segmentSize(summary.getSum())
|
|
||||||
.segmentCount(summary.getCount())
|
|
||||||
.internalBrokerDiskUsage(resultBrokers)
|
|
||||||
.build()
|
|
||||||
)
|
|
||||||
.internalTopicWithSegmentSize(resultTopics).build();
|
|
||||||
})
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<MetricDTO> getJmxMetric(String clusterName, Node node) {
|
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
|
||||||
.filter(c -> c.getJmxPort() != null)
|
|
||||||
.filter(c -> c.getJmxPort() > 0)
|
|
||||||
.map(c -> jmxClusterUtil.getJmxMetrics(node.host(), c.getJmxPort(), c.isJmxSsl(),
|
|
||||||
c.getJmxUsername(), c.getJmxPassword()))
|
|
||||||
.orElse(Collections.emptyList());
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<InternalClusterMetrics> fillJmxMetrics(InternalClusterMetrics internalClusterMetrics,
|
|
||||||
String clusterName, ReactiveAdminClient ac) {
|
|
||||||
return fillBrokerMetrics(internalClusterMetrics, clusterName, ac)
|
|
||||||
.map(this::calculateClusterMetrics);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<InternalClusterMetrics> fillBrokerMetrics(
|
|
||||||
InternalClusterMetrics internalClusterMetrics, String clusterName, ReactiveAdminClient ac) {
|
|
||||||
return ac.describeCluster()
|
|
||||||
.flatMapIterable(desc -> desc.getNodes())
|
|
||||||
.map(broker ->
|
|
||||||
Map.of(broker.id(), InternalBrokerMetrics.builder()
|
|
||||||
.metrics(getJmxMetric(clusterName, broker)).build())
|
|
||||||
)
|
|
||||||
.collectList()
|
|
||||||
.map(s -> internalClusterMetrics.toBuilder()
|
|
||||||
.internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build());
|
|
||||||
}
|
|
||||||
|
|
||||||
private InternalClusterMetrics calculateClusterMetrics(
|
|
||||||
InternalClusterMetrics internalClusterMetrics) {
|
|
||||||
final List<MetricDTO> metrics = internalClusterMetrics.getInternalBrokerMetrics().values()
|
|
||||||
.stream()
|
|
||||||
.flatMap(b -> b.getMetrics().stream())
|
|
||||||
.collect(
|
|
||||||
Collectors.groupingBy(
|
|
||||||
MetricDTO::getCanonicalName,
|
|
||||||
Collectors.reducing(jmxClusterUtil::reduceJmxMetrics)
|
|
||||||
)
|
|
||||||
).values().stream()
|
|
||||||
.filter(Optional::isPresent)
|
|
||||||
.map(Optional::get)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
final InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
|
|
||||||
internalClusterMetrics.toBuilder().metrics(metrics);
|
|
||||||
metricsBuilder.bytesInPerSec(findTopicMetrics(
|
|
||||||
metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate
|
|
||||||
));
|
|
||||||
metricsBuilder.bytesOutPerSec(findTopicMetrics(
|
|
||||||
metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate
|
|
||||||
));
|
|
||||||
return metricsBuilder.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<String, BigDecimal> findTopicMetrics(List<MetricDTO> metrics,
|
|
||||||
JmxMetricsName metricsName,
|
|
||||||
JmxMetricsValueName valueName) {
|
|
||||||
return metrics.stream().filter(m -> metricsName.name().equals(m.getName()))
|
|
||||||
.filter(m -> m.getParams().containsKey("topic"))
|
|
||||||
.filter(m -> m.getValue().containsKey(valueName.name()))
|
|
||||||
.map(m -> Tuples.of(
|
|
||||||
m.getParams().get("topic"),
|
|
||||||
m.getValue().get(valueName.name())
|
|
||||||
)).collect(Collectors.groupingBy(
|
|
||||||
Tuple2::getT1,
|
|
||||||
Collectors.reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<Integer, InternalPartition> getTopicPartitions(KafkaCluster c, InternalTopic topic) {
|
|
||||||
var tps = topic.getPartitions().values().stream()
|
|
||||||
.map(t -> new TopicPartition(topic.getName(), t.getPartition()))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
Map<Integer, InternalPartition> partitions =
|
|
||||||
topic.getPartitions().values().stream().collect(Collectors.toMap(
|
|
||||||
InternalPartition::getPartition,
|
|
||||||
tp -> tp
|
|
||||||
));
|
|
||||||
|
|
||||||
try (var consumer = createConsumer(c)) {
|
|
||||||
final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(tps);
|
|
||||||
final Map<TopicPartition, Long> latest = consumer.endOffsets(tps);
|
|
||||||
|
|
||||||
return tps.stream()
|
|
||||||
.map(tp -> partitions.get(tp.partition()).toBuilder()
|
|
||||||
.offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L))
|
|
||||||
.offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L))
|
|
||||||
.build()
|
|
||||||
).collect(Collectors.toMap(
|
|
||||||
InternalPartition::getPartition,
|
|
||||||
tp -> tp
|
|
||||||
));
|
|
||||||
} catch (Exception e) {
|
|
||||||
return Collections.emptyMap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> deleteTopicMessages(KafkaCluster cluster, Map<TopicPartition, Long> offsets) {
|
|
||||||
return adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
|
|
||||||
CreateTopicMessageDTO msg) {
|
|
||||||
RecordSerDe serde =
|
|
||||||
deserializationService.getRecordDeserializerForCluster(cluster);
|
|
||||||
|
|
||||||
Properties properties = new Properties();
|
|
||||||
properties.putAll(cluster.getProperties());
|
|
||||||
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
|
||||||
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
|
||||||
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
|
||||||
try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
|
|
||||||
ProducerRecord<byte[], byte[]> producerRecord = serde.serialize(
|
|
||||||
topic,
|
|
||||||
msg.getKey(),
|
|
||||||
msg.getContent(),
|
|
||||||
msg.getPartition()
|
|
||||||
);
|
|
||||||
producerRecord = new ProducerRecord<>(
|
|
||||||
producerRecord.topic(),
|
|
||||||
producerRecord.partition(),
|
|
||||||
producerRecord.key(),
|
|
||||||
producerRecord.value(),
|
|
||||||
createHeaders(msg.getHeaders()));
|
|
||||||
|
|
||||||
CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
|
|
||||||
producer.send(producerRecord, (metadata, exception) -> {
|
|
||||||
if (exception != null) {
|
|
||||||
cf.completeExceptionally(exception);
|
|
||||||
} else {
|
|
||||||
cf.complete(metadata);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return Mono.fromFuture(cf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private Iterable<Header> createHeaders(Map<String, String> clientHeaders) {
|
|
||||||
if (clientHeaders == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
RecordHeaders headers = new RecordHeaders();
|
|
||||||
clientHeaders.forEach((k, v) -> headers.add(new RecordHeader(k, v.getBytes())));
|
|
||||||
return headers;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<InternalTopic> increaseTopicPartitions(
|
|
||||||
KafkaCluster cluster,
|
|
||||||
String topicName,
|
|
||||||
PartitionsIncreaseDTO partitionsIncrease) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac -> {
|
|
||||||
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
|
||||||
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
|
||||||
|
|
||||||
if (requestedCount < actualCount) {
|
|
||||||
return Mono.error(
|
|
||||||
new ValidationException(String.format(
|
|
||||||
"Topic currently has %s partitions, which is higher than the requested %s.",
|
|
||||||
actualCount, requestedCount)));
|
|
||||||
}
|
|
||||||
if (requestedCount.equals(actualCount)) {
|
|
||||||
return Mono.error(
|
|
||||||
new ValidationException(
|
|
||||||
String.format("Topic already has %s partitions.", actualCount)));
|
|
||||||
}
|
|
||||||
|
|
||||||
Map<String, NewPartitions> newPartitionsMap = Collections.singletonMap(
|
|
||||||
topicName,
|
|
||||||
NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
|
|
||||||
);
|
|
||||||
return ac.createPartitions(newPartitionsMap)
|
|
||||||
.then(getUpdatedTopic(ac, topicName));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<InternalTopic> changeReplicationFactor(
|
|
||||||
ReactiveAdminClient adminClient,
|
|
||||||
String topicName,
|
|
||||||
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
|
|
||||||
) {
|
|
||||||
return adminClient.alterPartitionReassignments(reassignments)
|
|
||||||
.then(getUpdatedTopic(adminClient, topicName));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Change topic replication factor, works on brokers versions 5.4.x and higher
|
|
||||||
*/
|
|
||||||
public Mono<InternalTopic> changeReplicationFactor(
|
|
||||||
KafkaCluster cluster,
|
|
||||||
String topicName,
|
|
||||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac -> {
|
|
||||||
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
|
||||||
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
|
||||||
Integer brokersCount = cluster.getMetrics().getBrokerCount();
|
|
||||||
|
|
||||||
if (requested.equals(actual)) {
|
|
||||||
return Mono.error(
|
|
||||||
new ValidationException(
|
|
||||||
String.format("Topic already has replicationFactor %s.", actual)));
|
|
||||||
}
|
|
||||||
if (requested > brokersCount) {
|
|
||||||
return Mono.error(
|
|
||||||
new ValidationException(
|
|
||||||
String.format("Requested replication factor %s more than brokers count %s.",
|
|
||||||
requested, brokersCount)));
|
|
||||||
}
|
|
||||||
return changeReplicationFactor(ac, topicName,
|
|
||||||
getPartitionsReassignments(cluster, topicName,
|
|
||||||
replicationFactorChange));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
|
|
||||||
KafkaCluster cluster, List<Integer> reqBrokers) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(admin -> {
|
|
||||||
List<Integer> brokers = new ArrayList<>(cluster.getBrokers());
|
|
||||||
if (reqBrokers != null && !reqBrokers.isEmpty()) {
|
|
||||||
brokers.retainAll(reqBrokers);
|
|
||||||
}
|
|
||||||
return admin.describeLogDirs(brokers);
|
|
||||||
})
|
|
||||||
.onErrorResume(TimeoutException.class, (TimeoutException e) -> {
|
|
||||||
log.error("Error during fetching log dirs", e);
|
|
||||||
return Mono.just(new HashMap<>());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
|
|
||||||
KafkaCluster cluster,
|
|
||||||
String topicName,
|
|
||||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
|
||||||
// Current assignment map (Partition number -> List of brokers)
|
|
||||||
Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(cluster, topicName);
|
|
||||||
// Brokers map (Broker id -> count)
|
|
||||||
Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
|
|
||||||
int currentReplicationFactor = cluster.getTopics().get(topicName).getReplicationFactor();
|
|
||||||
|
|
||||||
// If we should to increase Replication factor
|
|
||||||
if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
|
|
||||||
// For each partition
|
|
||||||
for (var assignmentList : currentAssignment.values()) {
|
|
||||||
// Get brokers list sorted by usage
|
|
||||||
var brokers = brokersUsage.entrySet().stream()
|
|
||||||
.sorted(Map.Entry.comparingByValue())
|
|
||||||
.map(Map.Entry::getKey)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
// Iterate brokers and try to add them in assignment
|
|
||||||
// while (partition replicas count != requested replication factor)
|
|
||||||
for (Integer broker : brokers) {
|
|
||||||
if (!assignmentList.contains(broker)) {
|
|
||||||
assignmentList.add(broker);
|
|
||||||
brokersUsage.merge(broker, 1, Integer::sum);
|
|
||||||
}
|
|
||||||
if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) {
|
|
||||||
throw new ValidationException("Something went wrong during adding replicas");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we should to decrease Replication factor
|
|
||||||
} else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) {
|
|
||||||
for (Map.Entry<Integer, List<Integer>> assignmentEntry : currentAssignment.entrySet()) {
|
|
||||||
var partition = assignmentEntry.getKey();
|
|
||||||
var brokers = assignmentEntry.getValue();
|
|
||||||
|
|
||||||
// Get brokers list sorted by usage in reverse order
|
|
||||||
var brokersUsageList = brokersUsage.entrySet().stream()
|
|
||||||
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
|
|
||||||
.map(Map.Entry::getKey)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
// Iterate brokers and try to remove them from assignment
|
|
||||||
// while (partition replicas count != requested replication factor)
|
|
||||||
for (Integer broker : brokersUsageList) {
|
|
||||||
// Check is the broker the leader of partition
|
|
||||||
if (!cluster.getTopics().get(topicName).getPartitions().get(partition).getLeader()
|
|
||||||
.equals(broker)) {
|
|
||||||
brokers.remove(broker);
|
|
||||||
brokersUsage.merge(broker, -1, Integer::sum);
|
|
||||||
}
|
|
||||||
if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) {
|
|
||||||
throw new ValidationException("Something went wrong during removing replicas");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
throw new ValidationException("Replication factor already equals requested");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return result map
|
|
||||||
return currentAssignment.entrySet().stream().collect(Collectors.toMap(
|
|
||||||
e -> new TopicPartition(topicName, e.getKey()),
|
|
||||||
e -> Optional.of(new NewPartitionReassignment(e.getValue()))
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<Integer, List<Integer>> getCurrentAssignment(KafkaCluster cluster, String topicName) {
|
|
||||||
return cluster.getTopics().get(topicName).getPartitions().values().stream()
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
InternalPartition::getPartition,
|
|
||||||
p -> p.getReplicas().stream()
|
|
||||||
.map(InternalReplica::getBroker)
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
|
|
||||||
Map<Integer, List<Integer>> currentAssignment) {
|
|
||||||
Map<Integer, Integer> result = cluster.getBrokers().stream()
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
c -> c,
|
|
||||||
c -> 0
|
|
||||||
));
|
|
||||||
currentAssignment.values().forEach(brokers -> brokers
|
|
||||||
.forEach(broker -> result.put(broker, result.get(broker) + 1)));
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster, Integer broker,
|
|
||||||
BrokerLogdirUpdateDTO brokerLogDir) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<Void> updateBrokerLogDir(ReactiveAdminClient admin,
|
|
||||||
BrokerLogdirUpdateDTO b,
|
|
||||||
Integer broker) {
|
|
||||||
|
|
||||||
Map<TopicPartitionReplica, String> req = Map.of(
|
|
||||||
new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker),
|
|
||||||
b.getLogDir());
|
|
||||||
return admin.alterReplicaLogDirs(req)
|
|
||||||
.onErrorResume(UnknownTopicOrPartitionException.class,
|
|
||||||
e -> Mono.error(new TopicOrPartitionNotFoundException()))
|
|
||||||
.onErrorResume(LogDirNotFoundException.class,
|
|
||||||
e -> Mono.error(new LogDirNotFoundApiException()))
|
|
||||||
.doOnError(log::error);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateBrokerConfigByName(KafkaCluster cluster,
|
|
||||||
Integer broker,
|
|
||||||
String name,
|
|
||||||
String value) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac -> ac.updateBrokerConfigByName(broker, name, value))
|
|
||||||
.onErrorResume(InvalidRequestException.class,
|
|
||||||
e -> Mono.error(new InvalidRequestApiException(e.getMessage())))
|
|
||||||
.doOnError(log::error);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -17,13 +17,11 @@ import reactor.core.publisher.Mono;
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
public class KsqlService {
|
public class KsqlService {
|
||||||
private final KsqlClient ksqlClient;
|
private final KsqlClient ksqlClient;
|
||||||
private final ClustersStorage clustersStorage;
|
|
||||||
private final List<BaseStrategy> ksqlStatementStrategies;
|
private final List<BaseStrategy> ksqlStatementStrategies;
|
||||||
|
|
||||||
public Mono<KsqlCommandResponseDTO> executeKsqlCommand(String clusterName,
|
public Mono<KsqlCommandResponseDTO> executeKsqlCommand(KafkaCluster cluster,
|
||||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
Mono<KsqlCommandDTO> ksqlCommand) {
|
||||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
return Mono.justOrEmpty(cluster)
|
||||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
|
||||||
.map(KafkaCluster::getKsqldbServer)
|
.map(KafkaCluster::getKsqldbServer)
|
||||||
.onErrorResume(e -> {
|
.onErrorResume(e -> {
|
||||||
Throwable throwable =
|
Throwable throwable =
|
||||||
|
|
|
@ -0,0 +1,193 @@
|
||||||
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||||
|
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||||
|
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||||
|
import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
|
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||||
|
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||||
|
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||||
|
import com.provectus.kafka.ui.util.FilterTopicMessageEvents;
|
||||||
|
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||||
|
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.concurrent.CompletableFuture;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import lombok.extern.log4j.Log4j2;
|
||||||
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
|
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||||
|
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||||
|
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.header.Header;
|
||||||
|
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||||
|
import org.apache.kafka.common.header.internals.RecordHeaders;
|
||||||
|
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||||
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import org.springframework.util.StringUtils;
|
||||||
|
import reactor.core.publisher.Flux;
|
||||||
|
import reactor.core.publisher.FluxSink;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
import reactor.core.scheduler.Schedulers;
|
||||||
|
|
||||||
|
@Service
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
@Log4j2
|
||||||
|
public class MessagesService {
|
||||||
|
|
||||||
|
private static final int MAX_LOAD_RECORD_LIMIT = 100;
|
||||||
|
private static final int DEFAULT_LOAD_RECORD_LIMIT = 20;
|
||||||
|
|
||||||
|
private final AdminClientService adminClientService;
|
||||||
|
private final DeserializationService deserializationService;
|
||||||
|
private final ConsumerGroupService consumerGroupService;
|
||||||
|
|
||||||
|
public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
|
||||||
|
List<Integer> partitionsToInclude) {
|
||||||
|
if (!cluster.getTopics().containsKey(topicName)) {
|
||||||
|
throw new TopicNotFoundException();
|
||||||
|
}
|
||||||
|
return offsetsForDeletion(cluster, topicName, partitionsToInclude)
|
||||||
|
.flatMap(offsets ->
|
||||||
|
adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets)));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
|
||||||
|
List<Integer> partitionsToInclude) {
|
||||||
|
return Mono.fromSupplier(() -> {
|
||||||
|
try (KafkaConsumer<Bytes, Bytes> consumer = consumerGroupService.createConsumer(cluster)) {
|
||||||
|
return significantOffsets(consumer, topicName, partitionsToInclude);
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Error occurred while consuming records", e);
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
|
||||||
|
CreateTopicMessageDTO msg) {
|
||||||
|
if (msg.getKey() == null && msg.getContent() == null) {
|
||||||
|
throw new ValidationException("Invalid message: both key and value can't be null");
|
||||||
|
}
|
||||||
|
if (msg.getPartition() != null
|
||||||
|
&& msg.getPartition() > cluster.getTopics().get(topic).getPartitionCount() - 1) {
|
||||||
|
throw new ValidationException("Invalid partition");
|
||||||
|
}
|
||||||
|
RecordSerDe serde =
|
||||||
|
deserializationService.getRecordDeserializerForCluster(cluster);
|
||||||
|
|
||||||
|
Properties properties = new Properties();
|
||||||
|
properties.putAll(cluster.getProperties());
|
||||||
|
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||||
|
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||||
|
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||||
|
try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
|
||||||
|
ProducerRecord<byte[], byte[]> producerRecord = serde.serialize(
|
||||||
|
topic,
|
||||||
|
msg.getKey(),
|
||||||
|
msg.getContent(),
|
||||||
|
msg.getPartition()
|
||||||
|
);
|
||||||
|
producerRecord = new ProducerRecord<>(
|
||||||
|
producerRecord.topic(),
|
||||||
|
producerRecord.partition(),
|
||||||
|
producerRecord.key(),
|
||||||
|
producerRecord.value(),
|
||||||
|
createHeaders(msg.getHeaders()));
|
||||||
|
|
||||||
|
CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
|
||||||
|
producer.send(producerRecord, (metadata, exception) -> {
|
||||||
|
if (exception != null) {
|
||||||
|
cf.completeExceptionally(exception);
|
||||||
|
} else {
|
||||||
|
cf.complete(metadata);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return Mono.fromFuture(cf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private Iterable<Header> createHeaders(@Nullable Map<String, String> clientHeaders) {
|
||||||
|
if (clientHeaders == null) {
|
||||||
|
return new RecordHeaders();
|
||||||
|
}
|
||||||
|
RecordHeaders headers = new RecordHeaders();
|
||||||
|
clientHeaders.forEach((k, v) -> headers.add(new RecordHeader(k, v.getBytes())));
|
||||||
|
return headers;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
||||||
|
ConsumerPosition consumerPosition, String query,
|
||||||
|
Integer limit) {
|
||||||
|
int recordsLimit = Optional.ofNullable(limit)
|
||||||
|
.map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT))
|
||||||
|
.orElse(DEFAULT_LOAD_RECORD_LIMIT);
|
||||||
|
|
||||||
|
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||||
|
RecordSerDe recordDeserializer =
|
||||||
|
deserializationService.getRecordDeserializerForCluster(cluster);
|
||||||
|
if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) {
|
||||||
|
emitter = new ForwardRecordEmitter(
|
||||||
|
() -> consumerGroupService.createConsumer(cluster),
|
||||||
|
new OffsetsSeekForward(topic, consumerPosition),
|
||||||
|
recordDeserializer
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
emitter = new BackwardRecordEmitter(
|
||||||
|
(Map<String, Object> props) -> consumerGroupService.createConsumer(cluster, props),
|
||||||
|
new OffsetsSeekBackward(topic, consumerPosition, recordsLimit),
|
||||||
|
recordDeserializer
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return Flux.create(emitter)
|
||||||
|
.filter(m -> filterTopicMessage(m, query))
|
||||||
|
.takeWhile(new FilterTopicMessageEvents(recordsLimit))
|
||||||
|
.subscribeOn(Schedulers.elastic())
|
||||||
|
.share();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* returns end offsets for partitions where start offset != end offsets.
|
||||||
|
* This is useful when we need to verify that partition is not empty.
|
||||||
|
*/
|
||||||
|
public static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
|
||||||
|
String topicName,
|
||||||
|
Collection<Integer>
|
||||||
|
partitionsToInclude) {
|
||||||
|
var partitions = consumer.partitionsFor(topicName).stream()
|
||||||
|
.filter(p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition()))
|
||||||
|
.map(p -> new TopicPartition(topicName, p.partition()))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
var beginningOffsets = consumer.beginningOffsets(partitions);
|
||||||
|
var endOffsets = consumer.endOffsets(partitions);
|
||||||
|
return endOffsets.entrySet().stream()
|
||||||
|
.filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
||||||
|
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean filterTopicMessage(TopicMessageEventDTO message, String query) {
|
||||||
|
if (StringUtils.isEmpty(query)
|
||||||
|
|| !message.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
final TopicMessageDTO msg = message.getMessage();
|
||||||
|
return (!StringUtils.isEmpty(msg.getKey()) && msg.getKey().contains(query))
|
||||||
|
|| (!StringUtils.isEmpty(msg.getContent()) && msg.getContent().contains(query));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,363 @@
|
||||||
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||||
|
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
||||||
|
import com.provectus.kafka.ui.model.InternalClusterMetrics;
|
||||||
|
import com.provectus.kafka.ui.model.InternalPartition;
|
||||||
|
import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
|
||||||
|
import com.provectus.kafka.ui.model.InternalTopic;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.model.MetricDTO;
|
||||||
|
import com.provectus.kafka.ui.model.ServerStatusDTO;
|
||||||
|
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||||
|
import com.provectus.kafka.ui.util.JmxClusterUtil;
|
||||||
|
import com.provectus.kafka.ui.util.JmxMetricsName;
|
||||||
|
import com.provectus.kafka.ui.util.JmxMetricsValueName;
|
||||||
|
import java.math.BigDecimal;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.LongSummaryStatistics;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import lombok.extern.log4j.Log4j2;
|
||||||
|
import org.apache.kafka.common.Node;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
import reactor.util.function.Tuple2;
|
||||||
|
import reactor.util.function.Tuple3;
|
||||||
|
import reactor.util.function.Tuples;
|
||||||
|
|
||||||
|
@Service
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
@Log4j2
|
||||||
|
public class MetricsService {
|
||||||
|
|
||||||
|
private final ZookeeperService zookeeperService;
|
||||||
|
private final JmxClusterUtil jmxClusterUtil;
|
||||||
|
private final AdminClientService adminClientService;
|
||||||
|
private final FeatureService featureService;
|
||||||
|
private final TopicsService topicsService;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates cluster's metrics and topics structure.
|
||||||
|
* @param cluster to be updated
|
||||||
|
* @return cluster with up-to-date metrics and topics structure
|
||||||
|
*/
|
||||||
|
public Mono<KafkaCluster> updateClusterMetrics(KafkaCluster cluster) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(
|
||||||
|
ac -> ac.getClusterVersion().flatMap(
|
||||||
|
version ->
|
||||||
|
getClusterMetrics(ac)
|
||||||
|
.flatMap(i -> fillJmxMetrics(i, cluster, ac))
|
||||||
|
.flatMap(clusterMetrics ->
|
||||||
|
topicsService.getTopicsData(ac).flatMap(it -> {
|
||||||
|
if (cluster.getDisableLogDirsCollection() == null
|
||||||
|
|| !cluster.getDisableLogDirsCollection()) {
|
||||||
|
return updateSegmentMetrics(ac, clusterMetrics, it
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
return emptySegmentMetrics(clusterMetrics, it);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).flatMap(
|
||||||
|
nc -> featureService.getAvailableFeatures(cluster).collectList()
|
||||||
|
.map(f -> nc.toBuilder().features(f).build())
|
||||||
|
).doOnError(e ->
|
||||||
|
log.error("Failed to collect cluster {} info", cluster.getName(), e)
|
||||||
|
).onErrorResume(
|
||||||
|
e -> Mono.just(cluster.toBuilder()
|
||||||
|
.status(ServerStatusDTO.OFFLINE)
|
||||||
|
.lastKafkaException(e)
|
||||||
|
.build())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private KafkaCluster buildFromData(KafkaCluster currentCluster,
|
||||||
|
String version,
|
||||||
|
InternalSegmentSizeDto segmentSizeDto) {
|
||||||
|
|
||||||
|
var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
|
||||||
|
var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
|
||||||
|
var brokersIds = new ArrayList<>(brokersMetrics.getInternalBrokerMetrics().keySet());
|
||||||
|
|
||||||
|
InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
|
||||||
|
brokersMetrics.toBuilder();
|
||||||
|
|
||||||
|
InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics);
|
||||||
|
|
||||||
|
ServerStatusDTO zookeeperStatus = ServerStatusDTO.OFFLINE;
|
||||||
|
Throwable zookeeperException = null;
|
||||||
|
try {
|
||||||
|
zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster)
|
||||||
|
? ServerStatusDTO.ONLINE
|
||||||
|
: ServerStatusDTO.OFFLINE;
|
||||||
|
} catch (Throwable e) {
|
||||||
|
zookeeperException = e;
|
||||||
|
}
|
||||||
|
|
||||||
|
InternalClusterMetrics clusterMetrics = metricsBuilder
|
||||||
|
.activeControllers(brokersMetrics.getActiveControllers())
|
||||||
|
.topicCount(topicsMetrics.getTopicCount())
|
||||||
|
.brokerCount(brokersMetrics.getBrokerCount())
|
||||||
|
.underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount())
|
||||||
|
.inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount())
|
||||||
|
.outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount())
|
||||||
|
.onlinePartitionCount(topicsMetrics.getOnlinePartitionCount())
|
||||||
|
.offlinePartitionCount(topicsMetrics.getOfflinePartitionCount())
|
||||||
|
.zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus))
|
||||||
|
.version(version)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
return currentCluster.toBuilder()
|
||||||
|
.version(version)
|
||||||
|
.status(ServerStatusDTO.ONLINE)
|
||||||
|
.zookeeperStatus(zookeeperStatus)
|
||||||
|
.lastZookeeperException(zookeeperException)
|
||||||
|
.lastKafkaException(null)
|
||||||
|
.metrics(clusterMetrics)
|
||||||
|
.topics(topics)
|
||||||
|
.brokers(brokersIds)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private InternalClusterMetrics collectTopicsMetrics(Map<String, InternalTopic> topics) {
|
||||||
|
|
||||||
|
int underReplicatedPartitions = 0;
|
||||||
|
int inSyncReplicasCount = 0;
|
||||||
|
int outOfSyncReplicasCount = 0;
|
||||||
|
int onlinePartitionCount = 0;
|
||||||
|
int offlinePartitionCount = 0;
|
||||||
|
|
||||||
|
for (InternalTopic topic : topics.values()) {
|
||||||
|
underReplicatedPartitions += topic.getUnderReplicatedPartitions();
|
||||||
|
inSyncReplicasCount += topic.getInSyncReplicas();
|
||||||
|
outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas());
|
||||||
|
onlinePartitionCount +=
|
||||||
|
topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1)
|
||||||
|
.sum();
|
||||||
|
offlinePartitionCount +=
|
||||||
|
topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1)
|
||||||
|
.sum();
|
||||||
|
}
|
||||||
|
|
||||||
|
return InternalClusterMetrics.builder()
|
||||||
|
.underReplicatedPartitionCount(underReplicatedPartitions)
|
||||||
|
.inSyncReplicasCount(inSyncReplicasCount)
|
||||||
|
.outOfSyncReplicasCount(outOfSyncReplicasCount)
|
||||||
|
.onlinePartitionCount(onlinePartitionCount)
|
||||||
|
.offlinePartitionCount(offlinePartitionCount)
|
||||||
|
.topicCount(topics.size())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<InternalClusterMetrics> getClusterMetrics(ReactiveAdminClient client) {
|
||||||
|
return client.describeCluster().map(desc ->
|
||||||
|
InternalClusterMetrics.builder()
|
||||||
|
.brokerCount(desc.getNodes().size())
|
||||||
|
.activeControllers(desc.getController() != null ? 1 : 0)
|
||||||
|
.build()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private InternalTopic mergeWithStats(InternalTopic topic,
|
||||||
|
Map<String, LongSummaryStatistics> topics,
|
||||||
|
Map<TopicPartition, LongSummaryStatistics> partitions) {
|
||||||
|
final LongSummaryStatistics stats = topics.get(topic.getName());
|
||||||
|
|
||||||
|
return topic.toBuilder()
|
||||||
|
.segmentSize(stats.getSum())
|
||||||
|
.segmentCount(stats.getCount())
|
||||||
|
.partitions(
|
||||||
|
topic.getPartitions().entrySet().stream().map(e ->
|
||||||
|
Tuples.of(e.getKey(), mergeWithStats(topic.getName(), e.getValue(), partitions))
|
||||||
|
).collect(Collectors.toMap(
|
||||||
|
Tuple2::getT1,
|
||||||
|
Tuple2::getT2
|
||||||
|
))
|
||||||
|
).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private InternalPartition mergeWithStats(String topic, InternalPartition partition,
|
||||||
|
Map<TopicPartition, LongSummaryStatistics> partitions) {
|
||||||
|
final LongSummaryStatistics stats =
|
||||||
|
partitions.get(new TopicPartition(topic, partition.getPartition()));
|
||||||
|
return partition.toBuilder()
|
||||||
|
.segmentSize(stats.getSum())
|
||||||
|
.segmentCount(stats.getCount())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<InternalSegmentSizeDto> emptySegmentMetrics(InternalClusterMetrics clusterMetrics,
|
||||||
|
List<InternalTopic> internalTopics) {
|
||||||
|
return Mono.just(
|
||||||
|
InternalSegmentSizeDto.builder()
|
||||||
|
.clusterMetricsWithSegmentSize(
|
||||||
|
clusterMetrics.toBuilder()
|
||||||
|
.segmentSize(0)
|
||||||
|
.segmentCount(0)
|
||||||
|
.internalBrokerDiskUsage(Collections.emptyMap())
|
||||||
|
.build()
|
||||||
|
)
|
||||||
|
.internalTopicWithSegmentSize(
|
||||||
|
internalTopics.stream().collect(
|
||||||
|
Collectors.toMap(
|
||||||
|
InternalTopic::getName,
|
||||||
|
i -> i
|
||||||
|
)
|
||||||
|
)
|
||||||
|
).build()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<InternalSegmentSizeDto> updateSegmentMetrics(ReactiveAdminClient ac,
|
||||||
|
InternalClusterMetrics clusterMetrics,
|
||||||
|
List<InternalTopic> internalTopics) {
|
||||||
|
return ac.describeCluster().flatMap(
|
||||||
|
clusterDescription ->
|
||||||
|
ac.describeLogDirs().map(log -> {
|
||||||
|
final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
|
||||||
|
log.entrySet().stream().flatMap(b ->
|
||||||
|
b.getValue().entrySet().stream().flatMap(topicMap ->
|
||||||
|
topicMap.getValue().replicaInfos.entrySet().stream()
|
||||||
|
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
|
||||||
|
)
|
||||||
|
).collect(Collectors.toList());
|
||||||
|
|
||||||
|
final Map<TopicPartition, LongSummaryStatistics> partitionStats =
|
||||||
|
topicPartitions.stream().collect(
|
||||||
|
Collectors.groupingBy(
|
||||||
|
Tuple2::getT2,
|
||||||
|
Collectors.summarizingLong(Tuple3::getT3)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
final Map<String, LongSummaryStatistics> topicStats =
|
||||||
|
topicPartitions.stream().collect(
|
||||||
|
Collectors.groupingBy(
|
||||||
|
t -> t.getT2().topic(),
|
||||||
|
Collectors.summarizingLong(Tuple3::getT3)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
final Map<Integer, LongSummaryStatistics> brokerStats =
|
||||||
|
topicPartitions.stream().collect(
|
||||||
|
Collectors.groupingBy(
|
||||||
|
Tuple2::getT1,
|
||||||
|
Collectors.summarizingLong(Tuple3::getT3)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
|
||||||
|
|
||||||
|
final LongSummaryStatistics summary =
|
||||||
|
topicPartitions.stream().collect(Collectors.summarizingLong(Tuple3::getT3));
|
||||||
|
|
||||||
|
|
||||||
|
final Map<String, InternalTopic> resultTopics = internalTopics.stream().map(e ->
|
||||||
|
Tuples.of(e.getName(), mergeWithStats(e, topicStats, partitionStats))
|
||||||
|
).collect(Collectors.toMap(
|
||||||
|
Tuple2::getT1,
|
||||||
|
Tuple2::getT2
|
||||||
|
));
|
||||||
|
|
||||||
|
final Map<Integer, InternalBrokerDiskUsage> resultBrokers =
|
||||||
|
brokerStats.entrySet().stream().map(e ->
|
||||||
|
Tuples.of(e.getKey(), InternalBrokerDiskUsage.builder()
|
||||||
|
.segmentSize(e.getValue().getSum())
|
||||||
|
.segmentCount(e.getValue().getCount())
|
||||||
|
.build()
|
||||||
|
)
|
||||||
|
).collect(Collectors.toMap(
|
||||||
|
Tuple2::getT1,
|
||||||
|
Tuple2::getT2
|
||||||
|
));
|
||||||
|
|
||||||
|
return InternalSegmentSizeDto.builder()
|
||||||
|
.clusterMetricsWithSegmentSize(
|
||||||
|
clusterMetrics.toBuilder()
|
||||||
|
.segmentSize(summary.getSum())
|
||||||
|
.segmentCount(summary.getCount())
|
||||||
|
.internalBrokerDiskUsage(resultBrokers)
|
||||||
|
.build()
|
||||||
|
)
|
||||||
|
.internalTopicWithSegmentSize(resultTopics).build();
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private List<MetricDTO> getJmxMetric(KafkaCluster cluster, Node node) {
|
||||||
|
return Optional.of(cluster)
|
||||||
|
.filter(c -> c.getJmxPort() != null)
|
||||||
|
.filter(c -> c.getJmxPort() > 0)
|
||||||
|
.map(c -> jmxClusterUtil.getJmxMetrics(node.host(), c.getJmxPort(), c.isJmxSsl(),
|
||||||
|
c.getJmxUsername(), c.getJmxPassword()))
|
||||||
|
.orElse(Collections.emptyList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<InternalClusterMetrics> fillJmxMetrics(InternalClusterMetrics internalClusterMetrics,
|
||||||
|
KafkaCluster cluster,
|
||||||
|
ReactiveAdminClient ac) {
|
||||||
|
return fillBrokerMetrics(internalClusterMetrics, cluster, ac)
|
||||||
|
.map(this::calculateClusterMetrics);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<InternalClusterMetrics> fillBrokerMetrics(
|
||||||
|
InternalClusterMetrics internalClusterMetrics, KafkaCluster cluster, ReactiveAdminClient ac) {
|
||||||
|
return ac.describeCluster()
|
||||||
|
.flatMapIterable(ReactiveAdminClient.ClusterDescription::getNodes)
|
||||||
|
.map(broker ->
|
||||||
|
Map.of(broker.id(), InternalBrokerMetrics.builder()
|
||||||
|
.metrics(getJmxMetric(cluster, broker)).build())
|
||||||
|
)
|
||||||
|
.collectList()
|
||||||
|
.map(s -> internalClusterMetrics.toBuilder()
|
||||||
|
.internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build());
|
||||||
|
}
|
||||||
|
|
||||||
|
private InternalClusterMetrics calculateClusterMetrics(
|
||||||
|
InternalClusterMetrics internalClusterMetrics) {
|
||||||
|
final List<MetricDTO> metrics = internalClusterMetrics.getInternalBrokerMetrics().values()
|
||||||
|
.stream()
|
||||||
|
.flatMap(b -> b.getMetrics().stream())
|
||||||
|
.collect(
|
||||||
|
Collectors.groupingBy(
|
||||||
|
MetricDTO::getCanonicalName,
|
||||||
|
Collectors.reducing(jmxClusterUtil::reduceJmxMetrics)
|
||||||
|
)
|
||||||
|
).values().stream()
|
||||||
|
.filter(Optional::isPresent)
|
||||||
|
.map(Optional::get)
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
final InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
|
||||||
|
internalClusterMetrics.toBuilder().metrics(metrics);
|
||||||
|
metricsBuilder.bytesInPerSec(findTopicMetrics(
|
||||||
|
metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate
|
||||||
|
));
|
||||||
|
metricsBuilder.bytesOutPerSec(findTopicMetrics(
|
||||||
|
metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate
|
||||||
|
));
|
||||||
|
return metricsBuilder.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, BigDecimal> findTopicMetrics(List<MetricDTO> metrics,
|
||||||
|
JmxMetricsName metricsName,
|
||||||
|
JmxMetricsValueName valueName) {
|
||||||
|
return metrics.stream().filter(m -> metricsName.name().equals(m.getName()))
|
||||||
|
.filter(m -> m.getParams().containsKey("topic"))
|
||||||
|
.filter(m -> m.getValue().containsKey(valueName.name()))
|
||||||
|
.map(m -> Tuples.of(
|
||||||
|
m.getParams().get("topic"),
|
||||||
|
m.getValue().get(valueName.name())
|
||||||
|
)).collect(Collectors.groupingBy(
|
||||||
|
Tuple2::getT1,
|
||||||
|
Collectors.reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,20 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.extern.log4j.Log4j2;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@Log4j2
|
|
||||||
public class MetricsUpdateService {
|
|
||||||
|
|
||||||
private final KafkaService kafkaService;
|
|
||||||
|
|
||||||
public Mono<KafkaCluster> updateMetrics(KafkaCluster kafkaCluster) {
|
|
||||||
log.debug("Start getting metrics for kafkaCluster: {}", kafkaCluster.getName());
|
|
||||||
return kafkaService.getUpdatedCluster(kafkaCluster);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -4,6 +4,8 @@ import static com.google.common.util.concurrent.Uninterruptibles.getUninterrupti
|
||||||
import static java.util.stream.Collectors.toList;
|
import static java.util.stream.Collectors.toList;
|
||||||
import static java.util.stream.Collectors.toMap;
|
import static java.util.stream.Collectors.toMap;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||||
|
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||||
import com.provectus.kafka.ui.util.MapUtil;
|
import com.provectus.kafka.ui.util.MapUtil;
|
||||||
import com.provectus.kafka.ui.util.NumberUtil;
|
import com.provectus.kafka.ui.util.NumberUtil;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
|
@ -40,6 +42,8 @@ import org.apache.kafka.common.TopicPartition;
|
||||||
import org.apache.kafka.common.TopicPartitionReplica;
|
import org.apache.kafka.common.TopicPartitionReplica;
|
||||||
import org.apache.kafka.common.acl.AclOperation;
|
import org.apache.kafka.common.acl.AclOperation;
|
||||||
import org.apache.kafka.common.config.ConfigResource;
|
import org.apache.kafka.common.config.ConfigResource;
|
||||||
|
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
||||||
|
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
||||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
|
@ -186,7 +190,11 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> deleteConsumerGroups(Collection<String> groupIds) {
|
public Mono<Void> deleteConsumerGroups(Collection<String> groupIds) {
|
||||||
return toMono(client.deleteConsumerGroups(groupIds).all());
|
return toMono(client.deleteConsumerGroups(groupIds).all())
|
||||||
|
.onErrorResume(GroupIdNotFoundException.class,
|
||||||
|
th -> Mono.error(new NotFoundException("The group id does not exist")))
|
||||||
|
.onErrorResume(GroupNotEmptyException.class,
|
||||||
|
th -> Mono.error(new IllegalEntityStateException("The group is not empty")));
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> createTopic(String name,
|
public Mono<Void> createTopic(String name,
|
||||||
|
|
|
@ -0,0 +1,443 @@
|
||||||
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.exception.TopicMetadataException;
|
||||||
|
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||||
|
import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
|
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||||
|
import com.provectus.kafka.ui.model.CleanupPolicy;
|
||||||
|
import com.provectus.kafka.ui.model.Feature;
|
||||||
|
import com.provectus.kafka.ui.model.InternalPartition;
|
||||||
|
import com.provectus.kafka.ui.model.InternalReplica;
|
||||||
|
import com.provectus.kafka.ui.model.InternalTopic;
|
||||||
|
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
||||||
|
import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
|
||||||
|
import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
|
||||||
|
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||||
|
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||||
|
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.Comparator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.function.Predicate;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import lombok.SneakyThrows;
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
|
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||||
|
import org.apache.kafka.clients.admin.NewPartitions;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.springframework.stereotype.Service;
|
||||||
|
import reactor.core.publisher.Flux;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
|
@Service
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
public class TopicsService {
|
||||||
|
|
||||||
|
private static final Integer DEFAULT_PAGE_SIZE = 25;
|
||||||
|
|
||||||
|
private final AdminClientService adminClientService;
|
||||||
|
private final ConsumerGroupService consumerGroupService;
|
||||||
|
private final ClustersStorage clustersStorage;
|
||||||
|
private final ClusterMapper clusterMapper;
|
||||||
|
private final DeserializationService deserializationService;
|
||||||
|
|
||||||
|
public TopicsResponseDTO getTopics(KafkaCluster cluster,
|
||||||
|
Optional<Integer> page,
|
||||||
|
Optional<Integer> nullablePerPage,
|
||||||
|
Optional<Boolean> showInternal,
|
||||||
|
Optional<String> search,
|
||||||
|
Optional<TopicColumnsToSortDTO> sortBy) {
|
||||||
|
Predicate<Integer> positiveInt = i -> i > 0;
|
||||||
|
int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
||||||
|
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
|
||||||
|
List<InternalTopic> topics = cluster.getTopics().values().stream()
|
||||||
|
.filter(topic -> !topic.isInternal()
|
||||||
|
|| showInternal
|
||||||
|
.map(i -> topic.isInternal() == i)
|
||||||
|
.orElse(true))
|
||||||
|
.filter(topic ->
|
||||||
|
search
|
||||||
|
.map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
|
||||||
|
.orElse(true))
|
||||||
|
.sorted(getComparatorForTopic(sortBy))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
var totalPages = (topics.size() / perPage)
|
||||||
|
+ (topics.size() % perPage == 0 ? 0 : 1);
|
||||||
|
return new TopicsResponseDTO()
|
||||||
|
.pageCount(totalPages)
|
||||||
|
.topics(
|
||||||
|
topics.stream()
|
||||||
|
.skip(topicsToSkip)
|
||||||
|
.limit(perPage)
|
||||||
|
.map(t ->
|
||||||
|
clusterMapper.toTopic(
|
||||||
|
t.toBuilder().partitions(getTopicPartitions(cluster, t)).build()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Comparator<InternalTopic> getComparatorForTopic(Optional<TopicColumnsToSortDTO> sortBy) {
|
||||||
|
var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
||||||
|
if (sortBy.isEmpty()) {
|
||||||
|
return defaultComparator;
|
||||||
|
}
|
||||||
|
switch (sortBy.get()) {
|
||||||
|
case TOTAL_PARTITIONS:
|
||||||
|
return Comparator.comparing(InternalTopic::getPartitionCount);
|
||||||
|
case OUT_OF_SYNC_REPLICAS:
|
||||||
|
return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
|
||||||
|
case REPLICATION_FACTOR:
|
||||||
|
return Comparator.comparing(InternalTopic::getReplicationFactor);
|
||||||
|
case NAME:
|
||||||
|
default:
|
||||||
|
return defaultComparator;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Optional<TopicDetailsDTO> getTopicDetails(KafkaCluster cluster, String topicName) {
|
||||||
|
return Optional.ofNullable(cluster.getTopics()).map(l -> l.get(topicName)).map(
|
||||||
|
t -> t.toBuilder().partitions(getTopicPartitions(cluster, t)
|
||||||
|
).build()
|
||||||
|
).map(t -> clusterMapper.toTopicDetails(t, cluster.getMetrics()));
|
||||||
|
}
|
||||||
|
|
||||||
|
@SneakyThrows
|
||||||
|
public Mono<List<InternalTopic>> getTopicsData(ReactiveAdminClient client) {
|
||||||
|
return client.listTopics(true)
|
||||||
|
.flatMap(topics -> getTopicsData(client, topics).collectList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Flux<InternalTopic> getTopicsData(ReactiveAdminClient client, Collection<String> topics) {
|
||||||
|
final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
|
||||||
|
loadTopicsConfig(client, topics);
|
||||||
|
|
||||||
|
return client.describeTopics(topics)
|
||||||
|
.map(m -> m.values().stream()
|
||||||
|
.map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()))
|
||||||
|
.flatMap(internalTopics -> configsMono
|
||||||
|
.map(configs -> mergeWithConfigs(internalTopics, configs).values()))
|
||||||
|
.flatMapMany(Flux::fromIterable);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Optional<List<TopicConfigDTO>> getTopicConfigs(KafkaCluster cluster, String topicName) {
|
||||||
|
return Optional.of(cluster)
|
||||||
|
.map(KafkaCluster::getTopics)
|
||||||
|
.map(t -> t.get(topicName))
|
||||||
|
.map(t -> t.getTopicConfigs().stream().map(clusterMapper::toTopicConfig)
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@SneakyThrows
|
||||||
|
private Mono<InternalTopic> createTopic(ReactiveAdminClient adminClient,
|
||||||
|
Mono<TopicCreationDTO> topicCreation) {
|
||||||
|
return topicCreation.flatMap(topicData ->
|
||||||
|
adminClient.createTopic(
|
||||||
|
topicData.getName(),
|
||||||
|
topicData.getPartitions(),
|
||||||
|
topicData.getReplicationFactor().shortValue(),
|
||||||
|
topicData.getConfigs()
|
||||||
|
).thenReturn(topicData)
|
||||||
|
)
|
||||||
|
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
||||||
|
.flatMap(topicData -> getUpdatedTopic(adminClient, topicData.getName()))
|
||||||
|
.switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<TopicDTO> createTopic(
|
||||||
|
KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
||||||
|
return adminClientService.get(cluster).flatMap(ac -> createTopic(ac, topicCreation))
|
||||||
|
.doOnNext(t -> clustersStorage.onTopicUpdated(cluster, t))
|
||||||
|
.map(clusterMapper::toTopic);
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, InternalTopic> mergeWithConfigs(
|
||||||
|
List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
|
||||||
|
return topics.stream()
|
||||||
|
.map(t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build())
|
||||||
|
.map(t -> t.toBuilder().cleanUpPolicy(
|
||||||
|
CleanupPolicy.fromString(t.getTopicConfigs().stream()
|
||||||
|
.filter(config -> config.getName().equals("cleanup.policy"))
|
||||||
|
.findFirst()
|
||||||
|
.orElseGet(() -> InternalTopicConfig.builder().value("unknown").build())
|
||||||
|
.getValue())).build())
|
||||||
|
.collect(Collectors.toMap(
|
||||||
|
InternalTopic::getName,
|
||||||
|
e -> e
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<InternalTopic> getUpdatedTopic(ReactiveAdminClient ac, String topicName) {
|
||||||
|
return getTopicsData(ac, List.of(topicName)).next();
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<InternalTopic> updateTopic(KafkaCluster cluster,
|
||||||
|
String topicName,
|
||||||
|
TopicUpdateDTO topicUpdate) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(ac ->
|
||||||
|
ac.updateTopicConfig(topicName,
|
||||||
|
topicUpdate.getConfigs()).then(getUpdatedTopic(ac, topicName)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<TopicDTO> updateTopic(KafkaCluster cl, String topicName,
|
||||||
|
Mono<TopicUpdateDTO> topicUpdate) {
|
||||||
|
return topicUpdate
|
||||||
|
.flatMap(t -> updateTopic(cl, topicName, t))
|
||||||
|
.doOnNext(t -> clustersStorage.onTopicUpdated(cl, t))
|
||||||
|
.map(clusterMapper::toTopic);
|
||||||
|
}
|
||||||
|
|
||||||
|
@SneakyThrows
|
||||||
|
private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
|
||||||
|
ReactiveAdminClient client, Collection<String> topicNames) {
|
||||||
|
return client.getTopicsConfig(topicNames)
|
||||||
|
.map(configs ->
|
||||||
|
configs.entrySet().stream().collect(Collectors.toMap(
|
||||||
|
Map.Entry::getKey,
|
||||||
|
c -> c.getValue().stream()
|
||||||
|
.map(ClusterUtil::mapToInternalTopicConfig)
|
||||||
|
.collect(Collectors.toList()))));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<InternalTopic> changeReplicationFactor(
|
||||||
|
ReactiveAdminClient adminClient,
|
||||||
|
String topicName,
|
||||||
|
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
|
||||||
|
) {
|
||||||
|
return adminClient.alterPartitionReassignments(reassignments)
|
||||||
|
.then(getUpdatedTopic(adminClient, topicName));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Change topic replication factor, works on brokers versions 5.4.x and higher
|
||||||
|
*/
|
||||||
|
public Mono<ReplicationFactorChangeResponseDTO> changeReplicationFactor(
|
||||||
|
KafkaCluster cluster,
|
||||||
|
String topicName,
|
||||||
|
ReplicationFactorChangeDTO replicationFactorChange) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(ac -> {
|
||||||
|
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||||
|
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
||||||
|
Integer brokersCount = cluster.getMetrics().getBrokerCount();
|
||||||
|
|
||||||
|
if (requested.equals(actual)) {
|
||||||
|
return Mono.error(
|
||||||
|
new ValidationException(
|
||||||
|
String.format("Topic already has replicationFactor %s.", actual)));
|
||||||
|
}
|
||||||
|
if (requested > brokersCount) {
|
||||||
|
return Mono.error(
|
||||||
|
new ValidationException(
|
||||||
|
String.format("Requested replication factor %s more than brokers count %s.",
|
||||||
|
requested, brokersCount)));
|
||||||
|
}
|
||||||
|
return changeReplicationFactor(ac, topicName,
|
||||||
|
getPartitionsReassignments(cluster, topicName,
|
||||||
|
replicationFactorChange));
|
||||||
|
})
|
||||||
|
.doOnNext(topic -> clustersStorage.onTopicUpdated(cluster, topic))
|
||||||
|
.map(t -> new ReplicationFactorChangeResponseDTO()
|
||||||
|
.topicName(t.getName())
|
||||||
|
.totalReplicationFactor(t.getReplicationFactor()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
|
||||||
|
KafkaCluster cluster,
|
||||||
|
String topicName,
|
||||||
|
ReplicationFactorChangeDTO replicationFactorChange) {
|
||||||
|
// Current assignment map (Partition number -> List of brokers)
|
||||||
|
Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(cluster, topicName);
|
||||||
|
// Brokers map (Broker id -> count)
|
||||||
|
Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
|
||||||
|
int currentReplicationFactor = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||||
|
|
||||||
|
// If we should to increase Replication factor
|
||||||
|
if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
|
||||||
|
// For each partition
|
||||||
|
for (var assignmentList : currentAssignment.values()) {
|
||||||
|
// Get brokers list sorted by usage
|
||||||
|
var brokers = brokersUsage.entrySet().stream()
|
||||||
|
.sorted(Map.Entry.comparingByValue())
|
||||||
|
.map(Map.Entry::getKey)
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
|
||||||
|
// Iterate brokers and try to add them in assignment
|
||||||
|
// while (partition replicas count != requested replication factor)
|
||||||
|
for (Integer broker : brokers) {
|
||||||
|
if (!assignmentList.contains(broker)) {
|
||||||
|
assignmentList.add(broker);
|
||||||
|
brokersUsage.merge(broker, 1, Integer::sum);
|
||||||
|
}
|
||||||
|
if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) {
|
||||||
|
throw new ValidationException("Something went wrong during adding replicas");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we should to decrease Replication factor
|
||||||
|
} else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) {
|
||||||
|
for (Map.Entry<Integer, List<Integer>> assignmentEntry : currentAssignment.entrySet()) {
|
||||||
|
var partition = assignmentEntry.getKey();
|
||||||
|
var brokers = assignmentEntry.getValue();
|
||||||
|
|
||||||
|
// Get brokers list sorted by usage in reverse order
|
||||||
|
var brokersUsageList = brokersUsage.entrySet().stream()
|
||||||
|
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
|
||||||
|
.map(Map.Entry::getKey)
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
|
||||||
|
// Iterate brokers and try to remove them from assignment
|
||||||
|
// while (partition replicas count != requested replication factor)
|
||||||
|
for (Integer broker : brokersUsageList) {
|
||||||
|
// Check is the broker the leader of partition
|
||||||
|
if (!cluster.getTopics().get(topicName).getPartitions().get(partition).getLeader()
|
||||||
|
.equals(broker)) {
|
||||||
|
brokers.remove(broker);
|
||||||
|
brokersUsage.merge(broker, -1, Integer::sum);
|
||||||
|
}
|
||||||
|
if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) {
|
||||||
|
throw new ValidationException("Something went wrong during removing replicas");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new ValidationException("Replication factor already equals requested");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return result map
|
||||||
|
return currentAssignment.entrySet().stream().collect(Collectors.toMap(
|
||||||
|
e -> new TopicPartition(topicName, e.getKey()),
|
||||||
|
e -> Optional.of(new NewPartitionReassignment(e.getValue()))
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<Integer, List<Integer>> getCurrentAssignment(KafkaCluster cluster, String topicName) {
|
||||||
|
return cluster.getTopics().get(topicName).getPartitions().values().stream()
|
||||||
|
.collect(Collectors.toMap(
|
||||||
|
InternalPartition::getPartition,
|
||||||
|
p -> p.getReplicas().stream()
|
||||||
|
.map(InternalReplica::getBroker)
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
|
||||||
|
Map<Integer, List<Integer>> currentAssignment) {
|
||||||
|
Map<Integer, Integer> result = cluster.getBrokers().stream()
|
||||||
|
.collect(Collectors.toMap(
|
||||||
|
c -> c,
|
||||||
|
c -> 0
|
||||||
|
));
|
||||||
|
currentAssignment.values().forEach(brokers -> brokers
|
||||||
|
.forEach(broker -> result.put(broker, result.get(broker) + 1)));
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<PartitionsIncreaseResponseDTO> increaseTopicPartitions(
|
||||||
|
KafkaCluster cluster,
|
||||||
|
String topicName,
|
||||||
|
PartitionsIncreaseDTO partitionsIncrease) {
|
||||||
|
return adminClientService.get(cluster)
|
||||||
|
.flatMap(ac -> {
|
||||||
|
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
||||||
|
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
||||||
|
|
||||||
|
if (requestedCount < actualCount) {
|
||||||
|
return Mono.error(
|
||||||
|
new ValidationException(String.format(
|
||||||
|
"Topic currently has %s partitions, which is higher than the requested %s.",
|
||||||
|
actualCount, requestedCount)));
|
||||||
|
}
|
||||||
|
if (requestedCount.equals(actualCount)) {
|
||||||
|
return Mono.error(
|
||||||
|
new ValidationException(
|
||||||
|
String.format("Topic already has %s partitions.", actualCount)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<String, NewPartitions> newPartitionsMap = Collections.singletonMap(
|
||||||
|
topicName,
|
||||||
|
NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
|
||||||
|
);
|
||||||
|
return ac.createPartitions(newPartitionsMap)
|
||||||
|
.then(getUpdatedTopic(ac, topicName));
|
||||||
|
})
|
||||||
|
.doOnNext(t -> clustersStorage.onTopicUpdated(cluster, t))
|
||||||
|
.map(t -> new PartitionsIncreaseResponseDTO()
|
||||||
|
.topicName(t.getName())
|
||||||
|
.totalPartitionsCount(t.getPartitionCount()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<Integer, InternalPartition> getTopicPartitions(KafkaCluster c, InternalTopic topic) {
|
||||||
|
var tps = topic.getPartitions().values().stream()
|
||||||
|
.map(t -> new TopicPartition(topic.getName(), t.getPartition()))
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
Map<Integer, InternalPartition> partitions =
|
||||||
|
topic.getPartitions().values().stream().collect(Collectors.toMap(
|
||||||
|
InternalPartition::getPartition,
|
||||||
|
tp -> tp
|
||||||
|
));
|
||||||
|
|
||||||
|
try (var consumer = consumerGroupService.createConsumer(c)) {
|
||||||
|
final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(tps);
|
||||||
|
final Map<TopicPartition, Long> latest = consumer.endOffsets(tps);
|
||||||
|
|
||||||
|
return tps.stream()
|
||||||
|
.map(tp -> partitions.get(tp.partition()).toBuilder()
|
||||||
|
.offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L))
|
||||||
|
.offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L))
|
||||||
|
.build()
|
||||||
|
).collect(Collectors.toMap(
|
||||||
|
InternalPartition::getPartition,
|
||||||
|
tp -> tp
|
||||||
|
));
|
||||||
|
} catch (Exception e) {
|
||||||
|
return Collections.emptyMap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
||||||
|
var topicDetails = getTopicDetails(cluster, topicName)
|
||||||
|
.orElseThrow(TopicNotFoundException::new);
|
||||||
|
if (cluster.getFeatures().contains(Feature.TOPIC_DELETION)) {
|
||||||
|
return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName))
|
||||||
|
.doOnSuccess(t -> clustersStorage.onTopicDeleted(cluster, topicName));
|
||||||
|
} else {
|
||||||
|
return Mono.error(new ValidationException("Topic deletion restricted"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public TopicMessageSchemaDTO getTopicSchema(KafkaCluster cluster, String topicName) {
|
||||||
|
if (!cluster.getTopics().containsKey(topicName)) {
|
||||||
|
throw new TopicNotFoundException();
|
||||||
|
}
|
||||||
|
return deserializationService
|
||||||
|
.getRecordDeserializerForCluster(cluster)
|
||||||
|
.getTopicSchema(topicName);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -1,303 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.mockito.ArgumentMatchers.any;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
|
||||||
import com.provectus.kafka.ui.model.InternalTopic;
|
|
||||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TopicDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.UUID;
|
|
||||||
import java.util.function.Function;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.IntStream;
|
|
||||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mapstruct.factory.Mappers;
|
|
||||||
import org.mockito.InjectMocks;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.Spy;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class ClusterServiceTest {
|
|
||||||
@Spy
|
|
||||||
private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
|
|
||||||
@InjectMocks
|
|
||||||
private ClusterService clusterService;
|
|
||||||
@Mock
|
|
||||||
private ClustersStorage clustersStorage;
|
|
||||||
@Mock
|
|
||||||
private KafkaService kafkaService;
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldListFirst25Topics() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
final KafkaCluster cluster = KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(cluster));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName,
|
|
||||||
Optional.empty(), Optional.empty(), Optional.empty(),
|
|
||||||
Optional.empty(), Optional.empty());
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
|
||||||
assertThat(topics.getTopics()).hasSize(25);
|
|
||||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldCalculateCorrectPageCountForNonDivisiblePageSize() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName, Optional.of(4), Optional.of(33),
|
|
||||||
Optional.empty(), Optional.empty(), Optional.empty());
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
|
||||||
assertThat(topics.getTopics()).hasSize(1)
|
|
||||||
.first().extracting(TopicDTO::getName).isEqualTo("99");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1),
|
|
||||||
Optional.empty(), Optional.empty(), Optional.empty());
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
|
||||||
assertThat(topics.getTopics()).hasSize(25);
|
|
||||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldListBotInternalAndNonInternalTopics() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.internal(Integer.parseInt(e) % 10 == 0)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName,
|
|
||||||
Optional.empty(), Optional.empty(), Optional.of(true),
|
|
||||||
Optional.empty(), Optional.empty());
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
|
||||||
assertThat(topics.getTopics()).hasSize(25);
|
|
||||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldListOnlyNonInternalTopics() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.internal(Integer.parseInt(e) % 10 == 0)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName,
|
|
||||||
Optional.empty(), Optional.empty(), Optional.of(true),
|
|
||||||
Optional.empty(), Optional.empty());
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
|
||||||
assertThat(topics.getTopics()).hasSize(25);
|
|
||||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldListOnlyTopicsContainingOne() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName,
|
|
||||||
Optional.empty(), Optional.empty(), Optional.empty(),
|
|
||||||
Optional.of("1"), Optional.empty());
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(1);
|
|
||||||
assertThat(topics.getTopics()).hasSize(20);
|
|
||||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldListTopicsOrderedByPartitionsCount() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.partitions(Map.of())
|
|
||||||
.name(e)
|
|
||||||
.partitionCount(100 - Integer.parseInt(e))
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
when(
|
|
||||||
kafkaService.getTopicPartitions(any(), any())
|
|
||||||
).thenReturn(
|
|
||||||
Map.of()
|
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
var topics = clusterService.getTopics(topicName,
|
|
||||||
Optional.empty(), Optional.empty(), Optional.empty(),
|
|
||||||
Optional.empty(), Optional.of(TopicColumnsToSortDTO.TOTAL_PARTITIONS));
|
|
||||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
|
||||||
assertThat(topics.getTopics()).hasSize(25);
|
|
||||||
assertThat(topics.getTopics()).map(TopicDTO::getPartitionCount).isSorted();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void shouldRetrieveTopicConfigs() {
|
|
||||||
var topicName = UUID.randomUUID().toString();
|
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(topicName))
|
|
||||||
.thenReturn(Optional.of(KafkaCluster.builder()
|
|
||||||
.topics(
|
|
||||||
IntStream.rangeClosed(1, 100).boxed()
|
|
||||||
.map(Objects::toString)
|
|
||||||
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
|
||||||
.name(e)
|
|
||||||
.topicConfigs(
|
|
||||||
List.of(InternalTopicConfig.builder()
|
|
||||||
.name("testName")
|
|
||||||
.value("testValue")
|
|
||||||
.defaultValue("testDefaultValue")
|
|
||||||
.source(ConfigEntry.ConfigSource.DEFAULT_CONFIG)
|
|
||||||
.isReadOnly(true)
|
|
||||||
.isSensitive(true)
|
|
||||||
.synonyms(List.of())
|
|
||||||
.build()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.build()))
|
|
||||||
)
|
|
||||||
.build()));
|
|
||||||
|
|
||||||
var configs = clusterService.getTopicConfigs(topicName, "1");
|
|
||||||
var topicConfig = configs.isPresent() ? configs.get().get(0) : null;
|
|
||||||
|
|
||||||
assertThat(configs.isPresent()).isTrue();
|
|
||||||
assertThat(topicConfig.getName()).isEqualTo("testName");
|
|
||||||
assertThat(topicConfig.getValue()).isEqualTo("testValue");
|
|
||||||
assertThat(topicConfig.getDefaultValue()).isEqualTo("testDefaultValue");
|
|
||||||
assertThat(topicConfig.getSource().getValue())
|
|
||||||
.isEqualTo(ConfigEntry.ConfigSource.DEFAULT_CONFIG.name());
|
|
||||||
assertThat(topicConfig.getSynonyms()).isNotNull();
|
|
||||||
assertThat(topicConfig.getIsReadOnly()).isTrue();
|
|
||||||
assertThat(topicConfig.getIsSensitive()).isTrue();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -7,7 +7,6 @@ import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.client.KsqlClient;
|
import com.provectus.kafka.ui.client.KsqlClient;
|
||||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
@ -17,7 +16,6 @@ import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy;
|
import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy;
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy;
|
import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Optional;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
@ -45,81 +43,58 @@ class KsqlServiceTest {
|
||||||
this.alternativeStrategy = new DescribeStrategy();
|
this.alternativeStrategy = new DescribeStrategy();
|
||||||
this.ksqlService = new KsqlService(
|
this.ksqlService = new KsqlService(
|
||||||
this.ksqlClient,
|
this.ksqlClient,
|
||||||
this.clustersStorage,
|
|
||||||
List.of(baseStrategy, alternativeStrategy)
|
List.of(baseStrategy, alternativeStrategy)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldThrowClusterNotFoundExceptionOnExecuteKsqlCommand() {
|
|
||||||
String clusterName = "test";
|
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
|
||||||
when(clustersStorage.getClusterByName(clusterName)).thenReturn(Optional.ofNullable(null));
|
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command)))
|
|
||||||
.verifyError(ClusterNotFoundException.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void shouldThrowKsqlDbNotFoundExceptionOnExecuteKsqlCommand() {
|
void shouldThrowKsqlDbNotFoundExceptionOnExecuteKsqlCommand() {
|
||||||
String clusterName = "test";
|
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||||
when(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn(null);
|
when(kafkaCluster.getKsqldbServer()).thenReturn(null);
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command)))
|
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
||||||
.verifyError(KsqlDbNotFoundException.class);
|
.verifyError(KsqlDbNotFoundException.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() {
|
void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() {
|
||||||
String clusterName = "test";
|
|
||||||
KsqlCommandDTO command =
|
KsqlCommandDTO command =
|
||||||
(new KsqlCommandDTO()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');");
|
(new KsqlCommandDTO()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');");
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||||
when(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn("localhost:8088");
|
when(kafkaCluster.getKsqldbServer()).thenReturn("localhost:8088");
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command)))
|
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
||||||
.verifyError(UnprocessableEntityException.class);
|
.verifyError(UnprocessableEntityException.class);
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command)))
|
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
||||||
.verifyErrorMessage("Invalid sql");
|
.verifyErrorMessage("Invalid sql");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void shouldSetHostToStrategy() {
|
void shouldSetHostToStrategy() {
|
||||||
String clusterName = "test";
|
|
||||||
String host = "localhost:8088";
|
String host = "localhost:8088";
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn(host);
|
when(kafkaCluster.getKsqldbServer()).thenReturn(host);
|
||||||
when(ksqlClient.execute(any())).thenReturn(Mono.just(new KsqlCommandResponseDTO()));
|
when(ksqlClient.execute(any())).thenReturn(Mono.just(new KsqlCommandResponseDTO()));
|
||||||
|
|
||||||
ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block();
|
ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block();
|
||||||
assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql");
|
assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void shouldCallClientAndReturnResponse() {
|
void shouldCallClientAndReturnResponse() {
|
||||||
String clusterName = "test";
|
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||||
KsqlCommandResponseDTO response = new KsqlCommandResponseDTO().message("success");
|
KsqlCommandResponseDTO response = new KsqlCommandResponseDTO().message("success");
|
||||||
|
|
||||||
when(clustersStorage.getClusterByName(clusterName))
|
|
||||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn("host");
|
when(kafkaCluster.getKsqldbServer()).thenReturn("host");
|
||||||
when(ksqlClient.execute(any())).thenReturn(Mono.just(response));
|
when(ksqlClient.execute(any())).thenReturn(Mono.just(response));
|
||||||
|
|
||||||
KsqlCommandResponseDTO receivedResponse =
|
KsqlCommandResponseDTO receivedResponse =
|
||||||
ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block();
|
ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block();
|
||||||
verify(ksqlClient, times(1)).execute(alternativeStrategy);
|
verify(ksqlClient, times(1)).execute(alternativeStrategy);
|
||||||
assertThat(receivedResponse).isEqualTo(response);
|
assertThat(receivedResponse).isEqualTo(response);
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import com.provectus.kafka.ui.AbstractBaseTest;
|
import com.provectus.kafka.ui.AbstractBaseTest;
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import com.provectus.kafka.ui.model.MessageFormatDTO;
|
import com.provectus.kafka.ui.model.MessageFormatDTO;
|
||||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||||
|
@ -24,6 +25,7 @@ import java.util.function.Consumer;
|
||||||
import lombok.SneakyThrows;
|
import lombok.SneakyThrows;
|
||||||
import org.apache.kafka.clients.admin.NewTopic;
|
import org.apache.kafka.clients.admin.NewTopic;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
import org.springframework.test.context.ContextConfiguration;
|
import org.springframework.test.context.ContextConfiguration;
|
||||||
|
@ -118,12 +120,22 @@ public class SendAndReadTests extends AbstractBaseTest {
|
||||||
private static final String JSON_SCHEMA_RECORD
|
private static final String JSON_SCHEMA_RECORD
|
||||||
= "{ \"f1\": 12, \"f2\": \"testJsonSchema1\", \"schema\": \"some txt\" }";
|
= "{ \"f1\": 12, \"f2\": \"testJsonSchema1\", \"schema\": \"some txt\" }";
|
||||||
|
|
||||||
|
private KafkaCluster targetCluster;
|
||||||
|
|
||||||
@Autowired
|
@Autowired
|
||||||
private ClusterService clusterService;
|
private MessagesService messagesService;
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
private ClustersStorage clustersStorage;
|
||||||
|
|
||||||
@Autowired
|
@Autowired
|
||||||
private ClustersMetricsScheduler clustersMetricsScheduler;
|
private ClustersMetricsScheduler clustersMetricsScheduler;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void init() {
|
||||||
|
targetCluster = clustersStorage.getClusterByName(LOCAL).get();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void noSchemaStringKeyStringValue() {
|
void noSchemaStringKeyStringValue() {
|
||||||
new SendAndReadSpec()
|
new SendAndReadSpec()
|
||||||
|
@ -500,7 +512,8 @@ public class SendAndReadTests extends AbstractBaseTest {
|
||||||
public void assertSendThrowsException() {
|
public void assertSendThrowsException() {
|
||||||
String topic = createTopicAndCreateSchemas();
|
String topic = createTopicAndCreateSchemas();
|
||||||
try {
|
try {
|
||||||
assertThatThrownBy(() -> clusterService.sendMessage(LOCAL, topic, msgToSend).block());
|
assertThatThrownBy(() ->
|
||||||
|
messagesService.sendMessage(targetCluster, topic, msgToSend).block());
|
||||||
} finally {
|
} finally {
|
||||||
deleteTopic(topic);
|
deleteTopic(topic);
|
||||||
}
|
}
|
||||||
|
@ -510,9 +523,9 @@ public class SendAndReadTests extends AbstractBaseTest {
|
||||||
public void doAssert(Consumer<TopicMessageDTO> msgAssert) {
|
public void doAssert(Consumer<TopicMessageDTO> msgAssert) {
|
||||||
String topic = createTopicAndCreateSchemas();
|
String topic = createTopicAndCreateSchemas();
|
||||||
try {
|
try {
|
||||||
clusterService.sendMessage(LOCAL, topic, msgToSend).block();
|
messagesService.sendMessage(targetCluster, topic, msgToSend).block();
|
||||||
TopicMessageDTO polled = clusterService.getMessages(
|
TopicMessageDTO polled = messagesService.loadMessages(
|
||||||
LOCAL,
|
targetCluster,
|
||||||
topic,
|
topic,
|
||||||
new ConsumerPosition(
|
new ConsumerPosition(
|
||||||
SeekTypeDTO.BEGINNING,
|
SeekTypeDTO.BEGINNING,
|
||||||
|
|
|
@ -0,0 +1,233 @@
|
||||||
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||||
|
import com.provectus.kafka.ui.model.InternalTopic;
|
||||||
|
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||||
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
||||||
|
import com.provectus.kafka.ui.model.TopicDTO;
|
||||||
|
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.function.Function;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.IntStream;
|
||||||
|
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.extension.ExtendWith;
|
||||||
|
import org.mapstruct.factory.Mappers;
|
||||||
|
import org.mockito.InjectMocks;
|
||||||
|
import org.mockito.Mock;
|
||||||
|
import org.mockito.Spy;
|
||||||
|
import org.mockito.junit.jupiter.MockitoExtension;
|
||||||
|
|
||||||
|
@ExtendWith(MockitoExtension.class)
|
||||||
|
class TopicsServiceTest {
|
||||||
|
@Spy
|
||||||
|
private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
|
||||||
|
@InjectMocks
|
||||||
|
private TopicsService topicsService;
|
||||||
|
@Mock
|
||||||
|
private AdminClientService adminClientService;
|
||||||
|
@Mock
|
||||||
|
private ConsumerGroupService consumerGroupService;
|
||||||
|
@Mock
|
||||||
|
private ClustersStorage clustersStorage;
|
||||||
|
|
||||||
|
@Mock
|
||||||
|
private DeserializationService deserializationService;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldListFirst25Topics() {
|
||||||
|
final KafkaCluster cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster,
|
||||||
|
Optional.empty(), Optional.empty(), Optional.empty(),
|
||||||
|
Optional.empty(), Optional.empty());
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||||
|
assertThat(topics.getTopics()).hasSize(25);
|
||||||
|
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldCalculateCorrectPageCountForNonDivisiblePageSize() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster, Optional.of(4), Optional.of(33),
|
||||||
|
Optional.empty(), Optional.empty(), Optional.empty());
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||||
|
assertThat(topics.getTopics()).hasSize(1)
|
||||||
|
.first().extracting(TopicDTO::getName).isEqualTo("99");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster, Optional.of(0), Optional.of(-1),
|
||||||
|
Optional.empty(), Optional.empty(), Optional.empty());
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||||
|
assertThat(topics.getTopics()).hasSize(25);
|
||||||
|
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldListBotInternalAndNonInternalTopics() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.internal(Integer.parseInt(e) % 10 == 0)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster,
|
||||||
|
Optional.empty(), Optional.empty(), Optional.of(true),
|
||||||
|
Optional.empty(), Optional.empty());
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||||
|
assertThat(topics.getTopics()).hasSize(25);
|
||||||
|
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldListOnlyNonInternalTopics() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.internal(Integer.parseInt(e) % 10 == 0)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster,
|
||||||
|
Optional.empty(), Optional.empty(), Optional.of(true),
|
||||||
|
Optional.empty(), Optional.empty());
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||||
|
assertThat(topics.getTopics()).hasSize(25);
|
||||||
|
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldListOnlyTopicsContainingOne() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster,
|
||||||
|
Optional.empty(), Optional.empty(), Optional.empty(),
|
||||||
|
Optional.of("1"), Optional.empty());
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(1);
|
||||||
|
assertThat(topics.getTopics()).hasSize(20);
|
||||||
|
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldListTopicsOrderedByPartitionsCount() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.partitions(Map.of())
|
||||||
|
.name(e)
|
||||||
|
.partitionCount(100 - Integer.parseInt(e))
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var topics = topicsService.getTopics(cluster,
|
||||||
|
Optional.empty(), Optional.empty(), Optional.empty(),
|
||||||
|
Optional.empty(), Optional.of(TopicColumnsToSortDTO.TOTAL_PARTITIONS));
|
||||||
|
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||||
|
assertThat(topics.getTopics()).hasSize(25);
|
||||||
|
assertThat(topics.getTopics()).map(TopicDTO::getPartitionCount).isSorted();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void shouldRetrieveTopicConfigs() {
|
||||||
|
var cluster = KafkaCluster.builder()
|
||||||
|
.topics(
|
||||||
|
IntStream.rangeClosed(1, 100).boxed()
|
||||||
|
.map(Objects::toString)
|
||||||
|
.collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
|
||||||
|
.name(e)
|
||||||
|
.topicConfigs(
|
||||||
|
List.of(InternalTopicConfig.builder()
|
||||||
|
.name("testName")
|
||||||
|
.value("testValue")
|
||||||
|
.defaultValue("testDefaultValue")
|
||||||
|
.source(ConfigEntry.ConfigSource.DEFAULT_CONFIG)
|
||||||
|
.isReadOnly(true)
|
||||||
|
.isSensitive(true)
|
||||||
|
.synonyms(List.of())
|
||||||
|
.build()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.build()))
|
||||||
|
)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
var configs = topicsService.getTopicConfigs(cluster, "1");
|
||||||
|
var topicConfig = configs.isPresent() ? configs.get().get(0) : null;
|
||||||
|
|
||||||
|
assertThat(configs.isPresent()).isTrue();
|
||||||
|
assertThat(topicConfig.getName()).isEqualTo("testName");
|
||||||
|
assertThat(topicConfig.getValue()).isEqualTo("testValue");
|
||||||
|
assertThat(topicConfig.getDefaultValue()).isEqualTo("testDefaultValue");
|
||||||
|
assertThat(topicConfig.getSource().getValue())
|
||||||
|
.isEqualTo(ConfigEntry.ConfigSource.DEFAULT_CONFIG.name());
|
||||||
|
assertThat(topicConfig.getSynonyms()).isNotNull();
|
||||||
|
assertThat(topicConfig.getIsReadOnly()).isTrue();
|
||||||
|
assertThat(topicConfig.getIsSensitive()).isTrue();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
Loading…
Add table
Reference in a new issue