Issue#727 disable deleting (#768)
* broker config refactoring and adding broker config check before topic deletion * add TOPIC_DELETION feature, refactor KafkaService, add class for constants (Constants.java) * annotation fixes and change TOPIC_DELETION check * [issue-727] Disable delete button for a topic if deletion in restricted for this broker #727 * Rewrite to context * remove code smells * small fixes Co-authored-by: marselakhmetov <makhmetov@provectus.com> Co-authored-by: mbovtryuk <mbovtryuk@provectus.com> Co-authored-by: Alexander <mr.afigitelniychuvak@gmail.com>
This commit is contained in:
parent
840bc5dcee
commit
5c563de4f1
23 changed files with 423 additions and 175 deletions
|
@ -48,10 +48,7 @@ public class BrokersController implements BrokersApi {
|
|||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerConfig>>> getBrokerConfig(String clusterName, Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.getBrokerConfig(clusterName, id)
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build());
|
||||
return Mono.just(ResponseEntity.ok(clusterService.getBrokerConfig(clusterName, id)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,28 +1,8 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public enum Feature {
|
||||
KAFKA_CONNECT(cluster -> Optional.ofNullable(cluster.getKafkaConnect())
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.isPresent()
|
||||
),
|
||||
KSQL_DB(cluster -> cluster.getKsqldbServer() != null),
|
||||
SCHEMA_REGISTRY(cluster -> cluster.getSchemaRegistry() != null);
|
||||
|
||||
private final Predicate<KafkaCluster> isEnabled;
|
||||
|
||||
Feature(Predicate<KafkaCluster> isEnabled) {
|
||||
this.isEnabled = isEnabled;
|
||||
}
|
||||
|
||||
public static List<Feature> getEnabledFeatures(KafkaCluster cluster) {
|
||||
return Arrays.stream(values())
|
||||
.filter(feature -> feature.isEnabled.test(cluster))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
KAFKA_CONNECT,
|
||||
KSQL_DB,
|
||||
SCHEMA_REGISTRY,
|
||||
TOPIC_DELETION;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface AdminClientService {
|
||||
/**
|
||||
* Get ExtendedAdminClient from cache if exists or create new if not.
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @return The Mono of ExtendedAdminClient
|
||||
*/
|
||||
Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster);
|
||||
|
||||
/**
|
||||
* Create new ExtendedAdminClient.
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @return The Mono of ExtendedAdminClient
|
||||
*/
|
||||
Mono<ExtendedAdminClient> createAdminClient(KafkaCluster cluster);
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.Setter;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class AdminClientServiceImpl implements AdminClientService {
|
||||
private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
|
||||
@Setter // used in tests
|
||||
@Value("${kafka.admin-client-timeout}")
|
||||
private int clientTimeout;
|
||||
|
||||
@Override
|
||||
public Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
|
||||
return Mono.justOrEmpty(adminClientCache.get(cluster.getName()))
|
||||
.switchIfEmpty(createAdminClient(cluster))
|
||||
.map(e -> adminClientCache.computeIfAbsent(cluster.getName(), key -> e));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ExtendedAdminClient> createAdminClient(KafkaCluster cluster) {
|
||||
return Mono.fromSupplier(() -> {
|
||||
Properties properties = new Properties();
|
||||
properties.putAll(cluster.getProperties());
|
||||
properties
|
||||
.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
|
||||
return AdminClient.create(properties);
|
||||
}).flatMap(ExtendedAdminClient::extendedAdminClient);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.Node;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface BrokerService {
|
||||
/**
|
||||
* Get brokers config as map (Config name, Config).
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @param brokerId - node id
|
||||
* @return Mono of Map(String, InternalBrokerConfig)
|
||||
*/
|
||||
Mono<Map<String, InternalBrokerConfig>> getBrokerConfigMap(KafkaCluster cluster,
|
||||
Integer brokerId);
|
||||
|
||||
/**
|
||||
* Get brokers config as Flux of InternalBrokerConfig.
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @param brokerId - node id
|
||||
* @return Flux of InternalBrokerConfig
|
||||
*/
|
||||
Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId);
|
||||
|
||||
/**
|
||||
* Get active brokers in cluster.
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @return Flux of Broker
|
||||
*/
|
||||
Flux<Broker> getBrokers(KafkaCluster cluster);
|
||||
|
||||
/**
|
||||
* Get cluster controller node.
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @return Controller node
|
||||
*/
|
||||
Mono<Node> getController(KafkaCluster cluster);
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.DescribeConfigsOptions;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.config.ConfigResource;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class BrokerServiceImpl implements BrokerService {
|
||||
|
||||
private final AdminClientService adminClientService;
|
||||
|
||||
private Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(
|
||||
KafkaCluster cluster, List<Integer> brokersIds) {
|
||||
List<ConfigResource> resources = brokersIds.stream()
|
||||
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.map(ExtendedAdminClient::getAdminClient)
|
||||
.flatMap(adminClient ->
|
||||
ClusterUtil.toMono(adminClient.describeConfigs(resources,
|
||||
new DescribeConfigsOptions().includeSynonyms(true)).all())
|
||||
.map(config -> config.entrySet().stream()
|
||||
.collect(Collectors.toMap(
|
||||
c -> Integer.valueOf(c.getKey().name()),
|
||||
c -> new ArrayList<>(c.getValue().entries())))));
|
||||
}
|
||||
|
||||
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
||||
KafkaCluster cluster, Integer brokerId) {
|
||||
return loadBrokersConfig(cluster, Collections.singletonList(brokerId))
|
||||
.map(map -> map.values().stream()
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new IllegalEntityStateException(
|
||||
String.format("Config for broker %s not found", brokerId)))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Map<String, InternalBrokerConfig>> getBrokerConfigMap(KafkaCluster cluster,
|
||||
Integer brokerId) {
|
||||
return loadBrokersConfig(cluster, brokerId)
|
||||
.map(list -> list.stream()
|
||||
.collect(Collectors.toMap(
|
||||
ConfigEntry::name,
|
||||
ClusterUtil::mapToInternalBrokerConfig)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId) {
|
||||
if (!cluster.getBrokers().contains(brokerId)) {
|
||||
return Flux.error(
|
||||
new NotFoundException(String.format("Broker with id %s not found", brokerId)));
|
||||
}
|
||||
return loadBrokersConfig(cluster, brokerId)
|
||||
.map(list -> list.stream()
|
||||
.map(ClusterUtil::mapToInternalBrokerConfig)
|
||||
.collect(Collectors.toList()))
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<Broker> getBrokers(KafkaCluster cluster) {
|
||||
return adminClientService
|
||||
.getOrCreateAdminClient(cluster)
|
||||
.flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
|
||||
.map(n -> n.stream().map(node -> {
|
||||
Broker broker = new Broker();
|
||||
broker.setId(node.id());
|
||||
broker.setHost(node.host());
|
||||
return broker;
|
||||
}).collect(Collectors.toList())))
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Node> getController(KafkaCluster cluster) {
|
||||
return adminClientService
|
||||
.getOrCreateAdminClient(cluster)
|
||||
.map(ExtendedAdminClient::getAdminClient)
|
||||
.flatMap(adminClient -> ClusterUtil.toMono(adminClient.describeCluster().controller()));
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
|
@ -20,6 +22,7 @@ import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
|||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
|
@ -65,6 +68,8 @@ public class ClusterService {
|
|||
private final ClustersStorage clustersStorage;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final KafkaService kafkaService;
|
||||
private final AdminClientService adminClientService;
|
||||
private final BrokerService brokerService;
|
||||
private final ConsumingService consumingService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final DescribeLogDirsMapper describeLogDirsMapper;
|
||||
|
@ -212,23 +217,16 @@ public class ClusterService {
|
|||
}
|
||||
|
||||
public Flux<Broker> getBrokers(String clusterName) {
|
||||
return kafkaService
|
||||
.getOrCreateAdminClient(clustersStorage.getClusterByName(clusterName).orElseThrow())
|
||||
.flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
|
||||
.map(n -> n.stream().map(node -> {
|
||||
Broker broker = new Broker();
|
||||
broker.setId(node.id());
|
||||
broker.setHost(node.host());
|
||||
return broker;
|
||||
}).collect(Collectors.toList())))
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
public Mono<List<BrokerConfig>> getBrokerConfig(String clusterName, Integer brokerId) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMap(c -> kafkaService.getBrokerConfigs(c, brokerId))
|
||||
.map(c -> c.stream().map(clusterMapper::toBrokerConfig).collect(Collectors.toList()));
|
||||
.flatMapMany(brokerService::getBrokers);
|
||||
}
|
||||
|
||||
public Flux<BrokerConfig> getBrokerConfig(String clusterName, Integer brokerId) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMapMany(c -> brokerService.getBrokersConfig(c, brokerId))
|
||||
.map(clusterMapper::toBrokerConfig);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
|
@ -247,8 +245,12 @@ public class ClusterService {
|
|||
.orElseThrow(ClusterNotFoundException::new);
|
||||
var topic = getTopicDetails(clusterName, topicName)
|
||||
.orElseThrow(TopicNotFoundException::new);
|
||||
return kafkaService.deleteTopic(cluster, topic.getName())
|
||||
.doOnNext(t -> updateCluster(topicName, clusterName, cluster));
|
||||
if (cluster.getFeatures().contains(Feature.TOPIC_DELETION)) {
|
||||
return kafkaService.deleteTopic(cluster, topic.getName())
|
||||
.doOnNext(t -> updateCluster(topicName, clusterName, cluster));
|
||||
} else {
|
||||
return Mono.error(new ValidationException("Topic deletion restricted"));
|
||||
}
|
||||
}
|
||||
|
||||
private KafkaCluster updateCluster(InternalTopic topic, String clusterName,
|
||||
|
@ -311,7 +313,7 @@ public class ClusterService {
|
|||
public Mono<Void> deleteConsumerGroupById(String clusterName,
|
||||
String groupId) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> kafkaService.getOrCreateAdminClient(cluster)
|
||||
.map(cluster -> adminClientService.getOrCreateAdminClient(cluster)
|
||||
.map(ExtendedAdminClient::getAdminClient)
|
||||
.map(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)))
|
||||
.map(DeleteConsumerGroupsResult::all)
|
||||
|
|
|
@ -23,6 +23,7 @@ public class ClustersStorage {
|
|||
private final ClustersProperties clusterProperties;
|
||||
|
||||
private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
|
||||
private final FeatureService featureService;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
|
@ -36,7 +37,7 @@ public class ClustersStorage {
|
|||
clusterProperties.getName(),
|
||||
cluster.toBuilder()
|
||||
.topics(new HashMap<>())
|
||||
.features(Feature.getEnabledFeatures(cluster))
|
||||
.features(featureService.getAvailableFeatures(cluster))
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.util.List;
|
||||
|
||||
public interface FeatureService {
|
||||
/**
|
||||
* Get available features.
|
||||
*
|
||||
* @param cluster - cluster
|
||||
* @return List of Feature
|
||||
*/
|
||||
List<Feature> getAvailableFeatures(KafkaCluster cluster);
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE;
|
||||
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class FeatureServiceImpl implements FeatureService {
|
||||
|
||||
private final BrokerService brokerService;
|
||||
|
||||
@Override
|
||||
public List<Feature> getAvailableFeatures(KafkaCluster cluster) {
|
||||
List<Feature> features = new ArrayList<>();
|
||||
|
||||
if (Optional.ofNullable(cluster.getKafkaConnect())
|
||||
.filter(Predicate.not(List::isEmpty))
|
||||
.isPresent()) {
|
||||
features.add(Feature.KAFKA_CONNECT);
|
||||
}
|
||||
|
||||
if (cluster.getKsqldbServer() != null) {
|
||||
features.add(Feature.KSQL_DB);
|
||||
}
|
||||
|
||||
if (cluster.getSchemaRegistry() != null) {
|
||||
features.add(Feature.SCHEMA_REGISTRY);
|
||||
}
|
||||
|
||||
if (topicDeletionCheck(cluster)) {
|
||||
features.add(Feature.TOPIC_DELETION);
|
||||
}
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
private boolean topicDeletionCheck(KafkaCluster cluster) {
|
||||
return brokerService.getController(cluster)
|
||||
.map(Node::id)
|
||||
.flatMap(broker -> brokerService.getBrokerConfigMap(cluster, broker))
|
||||
.map(config -> {
|
||||
if (config != null && config.get(DELETE_TOPIC_ENABLE) != null) {
|
||||
return Boolean.parseBoolean(config.get(DELETE_TOPIC_ENABLE).getValue());
|
||||
}
|
||||
return false;
|
||||
}).blockOptional().orElse(false);
|
||||
}
|
||||
}
|
|
@ -47,15 +47,12 @@ import java.util.Optional;
|
|||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.Setter;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
import org.apache.kafka.clients.admin.AdminClientConfig;
|
||||
import org.apache.kafka.clients.admin.AlterConfigOp;
|
||||
import org.apache.kafka.clients.admin.Config;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
|
@ -88,7 +85,6 @@ import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
|||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
@ -104,13 +100,10 @@ public class KafkaService {
|
|||
private static final ListTopicsOptions LIST_TOPICS_OPTIONS =
|
||||
new ListTopicsOptions().listInternal(true);
|
||||
private final ZookeeperService zookeeperService;
|
||||
private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
|
||||
private final JmxClusterUtil jmxClusterUtil;
|
||||
private final ClustersStorage clustersStorage;
|
||||
private final DeserializationService deserializationService;
|
||||
@Setter // used in tests
|
||||
@Value("${kafka.admin-client-timeout}")
|
||||
private int clientTimeout;
|
||||
private final AdminClientService adminClientService;
|
||||
|
||||
public KafkaCluster getUpdatedCluster(KafkaCluster cluster, InternalTopic updatedTopic) {
|
||||
final Map<String, InternalTopic> topics =
|
||||
|
@ -129,7 +122,7 @@ public class KafkaService {
|
|||
|
||||
@SneakyThrows
|
||||
public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(
|
||||
ac -> ClusterUtil.getClusterVersion(ac.getAdminClient()).flatMap(
|
||||
version ->
|
||||
|
@ -306,36 +299,17 @@ public class KafkaService {
|
|||
}
|
||||
|
||||
public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicCreation> topicCreation) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> createTopic(ac.getAdminClient(), topicCreation));
|
||||
}
|
||||
|
||||
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.map(ExtendedAdminClient::getAdminClient)
|
||||
.map(adminClient -> adminClient.deleteTopics(List.of(topicName)))
|
||||
.then();
|
||||
}
|
||||
|
||||
|
||||
@SneakyThrows
|
||||
public Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
|
||||
return Mono.justOrEmpty(adminClientCache.get(cluster.getName()))
|
||||
.switchIfEmpty(createAdminClient(cluster))
|
||||
.map(e -> adminClientCache.computeIfAbsent(cluster.getName(), key -> e));
|
||||
}
|
||||
|
||||
public Mono<ExtendedAdminClient> createAdminClient(KafkaCluster kafkaCluster) {
|
||||
return Mono.fromSupplier(() -> {
|
||||
Properties properties = new Properties();
|
||||
properties.putAll(kafkaCluster.getProperties());
|
||||
properties
|
||||
.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers());
|
||||
properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
|
||||
return AdminClient.create(properties);
|
||||
}).flatMap(ExtendedAdminClient::extendedAdminClient);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
|
||||
AdminClient adminClient, Collection<String> topicNames) {
|
||||
|
@ -353,45 +327,9 @@ public class KafkaService {
|
|||
.collect(Collectors.toList()))));
|
||||
}
|
||||
|
||||
private Mono<Map<String, List<InternalBrokerConfig>>> loadBrokersConfig(
|
||||
AdminClient adminClient, List<Integer> brokersIds) {
|
||||
List<ConfigResource> resources = brokersIds.stream()
|
||||
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
return ClusterUtil.toMono(adminClient.describeConfigs(resources,
|
||||
new DescribeConfigsOptions().includeSynonyms(true)).all())
|
||||
.map(configs ->
|
||||
configs.entrySet().stream().collect(Collectors.toMap(
|
||||
c -> c.getKey().name(),
|
||||
c -> c.getValue().entries().stream()
|
||||
.map(ClusterUtil::mapToInternalBrokerConfig)
|
||||
.collect(Collectors.toList()))));
|
||||
}
|
||||
|
||||
private Mono<List<InternalBrokerConfig>> loadBrokersConfig(
|
||||
AdminClient adminClient, Integer brokerId) {
|
||||
return loadBrokersConfig(adminClient, Collections.singletonList(brokerId))
|
||||
.map(map -> map.values().stream()
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new IllegalEntityStateException(
|
||||
String.format("Config for broker %s not found", brokerId))));
|
||||
}
|
||||
|
||||
public Mono<List<InternalBrokerConfig>> getBrokerConfigs(KafkaCluster cluster, Integer brokerId) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
.flatMap(adminClient -> {
|
||||
if (!cluster.getBrokers().contains(brokerId)) {
|
||||
return Mono.error(
|
||||
new NotFoundException(String.format("Broker with id %s not found", brokerId)));
|
||||
}
|
||||
return loadBrokersConfig(adminClient.getAdminClient(), brokerId);
|
||||
});
|
||||
}
|
||||
|
||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
|
||||
KafkaCluster cluster) {
|
||||
return getOrCreateAdminClient(cluster).flatMap(ac ->
|
||||
return adminClientService.getOrCreateAdminClient(cluster).flatMap(ac ->
|
||||
ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
|
||||
.flatMap(s ->
|
||||
getConsumerGroupsInternal(
|
||||
|
@ -404,7 +342,7 @@ public class KafkaService {
|
|||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
|
||||
KafkaCluster cluster, List<String> groupIds) {
|
||||
|
||||
return getOrCreateAdminClient(cluster).flatMap(ac ->
|
||||
return adminClientService.getOrCreateAdminClient(cluster).flatMap(ac ->
|
||||
ClusterUtil.toMono(
|
||||
ac.getAdminClient().describeConsumerGroups(groupIds).all()
|
||||
).map(Map::values)
|
||||
|
@ -446,7 +384,7 @@ public class KafkaService {
|
|||
|
||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
|
||||
String consumerGroupId) {
|
||||
return getOrCreateAdminClient(cluster).map(ac ->
|
||||
return adminClientService.getOrCreateAdminClient(cluster).map(ac ->
|
||||
ac.getAdminClient()
|
||||
.listConsumerGroupOffsets(consumerGroupId)
|
||||
.partitionsToOffsetAndMetadata()
|
||||
|
@ -482,7 +420,7 @@ public class KafkaService {
|
|||
public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName,
|
||||
TopicUpdate topicUpdate) {
|
||||
ConfigResource topicCr = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
if (ac.getSupportedFeatures()
|
||||
.contains(ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS)) {
|
||||
|
@ -727,7 +665,8 @@ public class KafkaService {
|
|||
var records = offsets.entrySet().stream()
|
||||
.map(entry -> Map.entry(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue())))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
return getOrCreateAdminClient(cluster).map(ExtendedAdminClient::getAdminClient)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.map(ExtendedAdminClient::getAdminClient)
|
||||
.map(ac -> ac.deleteRecords(records)).then();
|
||||
}
|
||||
|
||||
|
@ -788,7 +727,7 @@ public class KafkaService {
|
|||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
PartitionsIncrease partitionsIncrease) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
||||
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
||||
|
@ -830,7 +769,7 @@ public class KafkaService {
|
|||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
||||
|
@ -855,7 +794,7 @@ public class KafkaService {
|
|||
|
||||
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
|
||||
KafkaCluster cluster, List<Integer> reqBrokers) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.map(admin -> {
|
||||
List<Integer> brokers = new ArrayList<>(cluster.getBrokers());
|
||||
if (reqBrokers != null && !reqBrokers.isEmpty()) {
|
||||
|
@ -971,7 +910,7 @@ public class KafkaService {
|
|||
|
||||
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster, Integer broker,
|
||||
BrokerLogdirUpdate brokerLogDir) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker));
|
||||
}
|
||||
|
||||
|
@ -996,7 +935,7 @@ public class KafkaService {
|
|||
Integer broker,
|
||||
String name,
|
||||
String value) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> updateBrokerConfigByName(ac, broker, name, value));
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
public class Constants {
|
||||
public static final String DELETE_TOPIC_ENABLE = "delete.topic.enable";
|
||||
}
|
|
@ -51,8 +51,9 @@ public class OffsetsResetServiceTest extends AbstractBaseTest {
|
|||
|
||||
@BeforeEach
|
||||
void init() {
|
||||
kafkaService = new KafkaService(null, null, null, null);
|
||||
kafkaService.setClientTimeout(5_000);
|
||||
AdminClientServiceImpl adminClientService = new AdminClientServiceImpl();
|
||||
adminClientService.setClientTimeout(5_000);
|
||||
kafkaService = new KafkaService(null, null, null, null, adminClientService);
|
||||
offsetsResetService = new OffsetsResetService(kafkaService);
|
||||
|
||||
createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
|
||||
|
|
|
@ -1522,6 +1522,7 @@ components:
|
|||
- SCHEMA_REGISTRY
|
||||
- KAFKA_CONNECT
|
||||
- KSQL_DB
|
||||
- TOPIC_DELETION
|
||||
required:
|
||||
- id
|
||||
- name
|
||||
|
|
|
@ -34,6 +34,9 @@ const Cluster: React.FC = () => {
|
|||
const hasSchemaRegistryConfigured = features.includes(
|
||||
ClusterFeaturesEnum.SCHEMA_REGISTRY
|
||||
);
|
||||
const isTopicDeletionAllowed = features.includes(
|
||||
ClusterFeaturesEnum.TOPIC_DELETION
|
||||
);
|
||||
const hasKsqlDbConfigured = features.includes(ClusterFeaturesEnum.KSQL_DB);
|
||||
|
||||
const contextValue = React.useMemo(
|
||||
|
@ -41,6 +44,7 @@ const Cluster: React.FC = () => {
|
|||
isReadOnly,
|
||||
hasKafkaConnectConfigured,
|
||||
hasSchemaRegistryConfigured,
|
||||
isTopicDeletionAllowed,
|
||||
}),
|
||||
[features]
|
||||
);
|
||||
|
|
|
@ -168,6 +168,7 @@ describe('Details', () => {
|
|||
isReadOnly: true,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
}}
|
||||
>
|
||||
{setupWrapper({ versions })}
|
||||
|
|
|
@ -100,6 +100,7 @@ describe('List', () => {
|
|||
isReadOnly: true,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
}}
|
||||
>
|
||||
{setupWrapper({ schemas: [] })}
|
||||
|
|
|
@ -36,7 +36,8 @@ const ListItem: React.FC<ListItemProps> = ({
|
|||
clusterName,
|
||||
clearTopicMessages,
|
||||
}) => {
|
||||
const { isReadOnly } = React.useContext(ClusterContext);
|
||||
const { isReadOnly, isTopicDeletionAllowed } =
|
||||
React.useContext(ClusterContext);
|
||||
|
||||
const [isDeleteTopicConfirmationVisible, setDeleteTopicConfirmationVisible] =
|
||||
React.useState(false);
|
||||
|
@ -125,11 +126,13 @@ const ListItem: React.FC<ListItemProps> = ({
|
|||
<DropdownItem onClick={clearTopicMessagesHandler}>
|
||||
<span className="has-text-danger">Clear Messages</span>
|
||||
</DropdownItem>
|
||||
<DropdownItem
|
||||
onClick={() => setDeleteTopicConfirmationVisible(true)}
|
||||
>
|
||||
<span className="has-text-danger">Remove Topic</span>
|
||||
</DropdownItem>
|
||||
{isTopicDeletionAllowed && (
|
||||
<DropdownItem
|
||||
onClick={() => setDeleteTopicConfirmationVisible(true)}
|
||||
>
|
||||
<span className="has-text-danger">Remove Topic</span>
|
||||
</DropdownItem>
|
||||
)}
|
||||
</Dropdown>
|
||||
</div>
|
||||
<ConfirmationModal
|
||||
|
|
|
@ -50,6 +50,7 @@ describe('List', () => {
|
|||
isReadOnly: true,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
...contextProps,
|
||||
}}
|
||||
>
|
||||
|
@ -90,6 +91,7 @@ describe('List', () => {
|
|||
isReadOnly: false,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
}}
|
||||
>
|
||||
{setupComponent()}
|
||||
|
@ -147,6 +149,7 @@ describe('List', () => {
|
|||
isReadOnly: false,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
}}
|
||||
>
|
||||
{setupComponent({
|
||||
|
|
|
@ -17,6 +17,11 @@ jest.mock(
|
|||
() => 'mock-ConfirmationModal'
|
||||
);
|
||||
|
||||
jest.mock('react-redux', () => ({
|
||||
...jest.requireActual('react-redux'),
|
||||
useSelector: () => ['TOPIC_DELETION'],
|
||||
}));
|
||||
|
||||
describe('ListItem', () => {
|
||||
const setupComponent = (props: Partial<ListItemProps> = {}) => (
|
||||
<ListItem
|
||||
|
|
|
@ -35,7 +35,8 @@ const Details: React.FC<Props> = ({
|
|||
clearTopicMessages,
|
||||
}) => {
|
||||
const history = useHistory();
|
||||
const { isReadOnly } = React.useContext(ClusterContext);
|
||||
const { isReadOnly, isTopicDeletionAllowed } =
|
||||
React.useContext(ClusterContext);
|
||||
const [isDeleteTopicConfirmationVisible, setDeleteTopicConfirmationVisible] =
|
||||
React.useState(false);
|
||||
const deleteTopicHandler = React.useCallback(() => {
|
||||
|
@ -95,13 +96,15 @@ const Details: React.FC<Props> = ({
|
|||
>
|
||||
Clear All Messages
|
||||
</button>
|
||||
<button
|
||||
className="button is-danger"
|
||||
type="button"
|
||||
onClick={() => setDeleteTopicConfirmationVisible(true)}
|
||||
>
|
||||
Delete Topic
|
||||
</button>
|
||||
{isTopicDeletionAllowed && (
|
||||
<button
|
||||
className="button is-danger"
|
||||
type="button"
|
||||
onClick={() => setDeleteTopicConfirmationVisible(true)}
|
||||
>
|
||||
Delete Topic
|
||||
</button>
|
||||
)}
|
||||
|
||||
<Link
|
||||
to={clusterTopicSendMessagePath(clusterName, topicName)}
|
||||
|
|
|
@ -7,6 +7,10 @@ import {
|
|||
internalTopicPayload,
|
||||
externalTopicPayload,
|
||||
} from 'redux/reducers/topics/__test__/fixtures';
|
||||
import { Provider } from 'react-redux';
|
||||
import configureStore from 'redux/store/configureStore';
|
||||
|
||||
const store = configureStore();
|
||||
|
||||
describe('Details', () => {
|
||||
const mockDelete = jest.fn();
|
||||
|
@ -18,24 +22,27 @@ describe('Details', () => {
|
|||
describe('when it has readonly flag', () => {
|
||||
it('does not render the Action button a Topic', () => {
|
||||
const component = mount(
|
||||
<StaticRouter>
|
||||
<ClusterContext.Provider
|
||||
value={{
|
||||
isReadOnly: true,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
}}
|
||||
>
|
||||
<Details
|
||||
clusterName={mockClusterName}
|
||||
topicName={internalTopicPayload.name}
|
||||
name={internalTopicPayload.name}
|
||||
isInternal={mockInternalTopicPayload}
|
||||
deleteTopic={mockDelete}
|
||||
clearTopicMessages={mockClearTopicMessages}
|
||||
/>
|
||||
</ClusterContext.Provider>
|
||||
</StaticRouter>
|
||||
<Provider store={store}>
|
||||
<StaticRouter>
|
||||
<ClusterContext.Provider
|
||||
value={{
|
||||
isReadOnly: true,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
}}
|
||||
>
|
||||
<Details
|
||||
clusterName={mockClusterName}
|
||||
topicName={internalTopicPayload.name}
|
||||
name={internalTopicPayload.name}
|
||||
isInternal={mockInternalTopicPayload}
|
||||
deleteTopic={mockDelete}
|
||||
clearTopicMessages={mockClearTopicMessages}
|
||||
/>
|
||||
</ClusterContext.Provider>
|
||||
</StaticRouter>
|
||||
</Provider>
|
||||
);
|
||||
|
||||
expect(component.exists('button')).toBeFalsy();
|
||||
|
@ -45,24 +52,27 @@ describe('Details', () => {
|
|||
describe('when it does not have readonly flag', () => {
|
||||
it('renders the Action button a Topic', () => {
|
||||
const component = mount(
|
||||
<StaticRouter>
|
||||
<ClusterContext.Provider
|
||||
value={{
|
||||
isReadOnly: false,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
}}
|
||||
>
|
||||
<Details
|
||||
clusterName={mockClusterName}
|
||||
topicName={internalTopicPayload.name}
|
||||
name={internalTopicPayload.name}
|
||||
isInternal={mockExternalTopicPayload}
|
||||
deleteTopic={mockDelete}
|
||||
clearTopicMessages={mockClearTopicMessages}
|
||||
/>
|
||||
</ClusterContext.Provider>
|
||||
</StaticRouter>
|
||||
<Provider store={store}>
|
||||
<StaticRouter>
|
||||
<ClusterContext.Provider
|
||||
value={{
|
||||
isReadOnly: false,
|
||||
hasKafkaConnectConfigured: true,
|
||||
hasSchemaRegistryConfigured: true,
|
||||
isTopicDeletionAllowed: true,
|
||||
}}
|
||||
>
|
||||
<Details
|
||||
clusterName={mockClusterName}
|
||||
topicName={internalTopicPayload.name}
|
||||
name={internalTopicPayload.name}
|
||||
isInternal={mockExternalTopicPayload}
|
||||
deleteTopic={mockDelete}
|
||||
clearTopicMessages={mockClearTopicMessages}
|
||||
/>
|
||||
</ClusterContext.Provider>
|
||||
</StaticRouter>
|
||||
</Provider>
|
||||
);
|
||||
|
||||
expect(component.exists('button')).toBeTruthy();
|
||||
|
|
|
@ -4,12 +4,14 @@ export interface ContextProps {
|
|||
isReadOnly: boolean;
|
||||
hasKafkaConnectConfigured: boolean;
|
||||
hasSchemaRegistryConfigured: boolean;
|
||||
isTopicDeletionAllowed: boolean;
|
||||
}
|
||||
|
||||
export const initialValue: ContextProps = {
|
||||
isReadOnly: false,
|
||||
hasKafkaConnectConfigured: false,
|
||||
hasSchemaRegistryConfigured: false,
|
||||
isTopicDeletionAllowed: true,
|
||||
};
|
||||
const ClusterContext = React.createContext(initialValue);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue