[BE Refactoring] Reactive admin client (#882)
* All AdminClient interactions moved to ReactiveAdminClient
This commit is contained in:
parent
da477b2896
commit
b05da3373e
14 changed files with 519 additions and 480 deletions
|
@ -108,7 +108,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
||||||
new ValidationException("Unknown resetType " + reset.getResetType())
|
new ValidationException("Unknown resetType " + reset.getResetType())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}).map(o -> ResponseEntity.ok().build());
|
}).thenReturn(ResponseEntity.ok().build());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
package com.provectus.kafka.ui.model;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
|
||||||
import java.util.Set;
|
|
||||||
import lombok.Data;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import org.apache.kafka.clients.admin.AdminClient;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Data
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public class ExtendedAdminClient {
|
|
||||||
|
|
||||||
private final AdminClient adminClient;
|
|
||||||
private final Set<SupportedFeature> supportedFeatures;
|
|
||||||
|
|
||||||
public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
|
|
||||||
|
|
||||||
return ClusterUtil.getSupportedFeatures(adminClient)
|
|
||||||
.map(s -> new ExtendedAdminClient(adminClient, s));
|
|
||||||
}
|
|
||||||
|
|
||||||
public enum SupportedFeature {
|
|
||||||
INCREMENTAL_ALTER_CONFIGS,
|
|
||||||
ALTER_CONFIGS
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,23 +1,10 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
public interface AdminClientService {
|
public interface AdminClientService {
|
||||||
/**
|
|
||||||
* Get ExtendedAdminClient from cache if exists or create new if not.
|
|
||||||
*
|
|
||||||
* @param cluster - cluster
|
|
||||||
* @return The Mono of ExtendedAdminClient
|
|
||||||
*/
|
|
||||||
Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster);
|
|
||||||
|
|
||||||
/**
|
Mono<ReactiveAdminClient> get(KafkaCluster cluster);
|
||||||
* Create new ExtendedAdminClient.
|
|
||||||
*
|
|
||||||
* @param cluster - cluster
|
|
||||||
* @return The Mono of ExtendedAdminClient
|
|
||||||
*/
|
|
||||||
Mono<ExtendedAdminClient> createAdminClient(KafkaCluster cluster);
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
import java.io.Closeable;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
@ -17,21 +17,20 @@ import reactor.core.publisher.Mono;
|
||||||
@Service
|
@Service
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class AdminClientServiceImpl implements AdminClientService {
|
public class AdminClientServiceImpl implements AdminClientService, Closeable {
|
||||||
private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
|
private final Map<String, ReactiveAdminClient> adminClientCache = new ConcurrentHashMap<>();
|
||||||
@Setter // used in tests
|
@Setter // used in tests
|
||||||
@Value("${kafka.admin-client-timeout}")
|
@Value("${kafka.admin-client-timeout}")
|
||||||
private int clientTimeout;
|
private int clientTimeout;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
|
public Mono<ReactiveAdminClient> get(KafkaCluster cluster) {
|
||||||
return Mono.justOrEmpty(adminClientCache.get(cluster.getName()))
|
return Mono.justOrEmpty(adminClientCache.get(cluster.getName()))
|
||||||
.switchIfEmpty(createAdminClient(cluster))
|
.switchIfEmpty(createAdminClient(cluster))
|
||||||
.map(e -> adminClientCache.computeIfAbsent(cluster.getName(), key -> e));
|
.map(e -> adminClientCache.computeIfAbsent(cluster.getName(), key -> e));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private Mono<ReactiveAdminClient> createAdminClient(KafkaCluster cluster) {
|
||||||
public Mono<ExtendedAdminClient> createAdminClient(KafkaCluster cluster) {
|
|
||||||
return Mono.fromSupplier(() -> {
|
return Mono.fromSupplier(() -> {
|
||||||
Properties properties = new Properties();
|
Properties properties = new Properties();
|
||||||
properties.putAll(cluster.getProperties());
|
properties.putAll(cluster.getProperties());
|
||||||
|
@ -39,6 +38,11 @@ public class AdminClientServiceImpl implements AdminClientService {
|
||||||
.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||||
properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
|
properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
|
||||||
return AdminClient.create(properties);
|
return AdminClient.create(properties);
|
||||||
}).flatMap(ExtendedAdminClient::extendedAdminClient);
|
}).flatMap(ReactiveAdminClient::create);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
adminClientCache.values().forEach(ReactiveAdminClient::close);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,22 +3,17 @@ package com.provectus.kafka.ui.service;
|
||||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.extern.log4j.Log4j2;
|
import lombok.extern.log4j.Log4j2;
|
||||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||||
import org.apache.kafka.clients.admin.DescribeConfigsOptions;
|
|
||||||
import org.apache.kafka.common.Node;
|
import org.apache.kafka.common.Node;
|
||||||
import org.apache.kafka.common.config.ConfigResource;
|
|
||||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
|
||||||
import org.springframework.stereotype.Service;
|
import org.springframework.stereotype.Service;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
@ -32,23 +27,8 @@ public class BrokerServiceImpl implements BrokerService {
|
||||||
|
|
||||||
private Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(
|
private Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(
|
||||||
KafkaCluster cluster, List<Integer> brokersIds) {
|
KafkaCluster cluster, List<Integer> brokersIds) {
|
||||||
List<ConfigResource> resources = brokersIds.stream()
|
return adminClientService.get(cluster)
|
||||||
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
.flatMap(ac -> ac.loadBrokersConfig(brokersIds));
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
|
||||||
.map(ExtendedAdminClient::getAdminClient)
|
|
||||||
.flatMap(adminClient ->
|
|
||||||
ClusterUtil.toMono(adminClient.describeConfigs(resources,
|
|
||||||
new DescribeConfigsOptions().includeSynonyms(true)).all())
|
|
||||||
.map(config -> config.entrySet()
|
|
||||||
.stream()
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
c -> Integer.valueOf(c.getKey().name()),
|
|
||||||
c -> List.copyOf(c.getValue().entries())
|
|
||||||
))
|
|
||||||
))
|
|
||||||
.onErrorResume(UnsupportedVersionException.class, (e) -> Mono.just(new HashMap<>()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
||||||
|
@ -87,22 +67,23 @@ public class BrokerServiceImpl implements BrokerService {
|
||||||
@Override
|
@Override
|
||||||
public Flux<BrokerDTO> getBrokers(KafkaCluster cluster) {
|
public Flux<BrokerDTO> getBrokers(KafkaCluster cluster) {
|
||||||
return adminClientService
|
return adminClientService
|
||||||
.getOrCreateAdminClient(cluster)
|
.get(cluster)
|
||||||
.flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
|
.flatMap(ReactiveAdminClient::describeCluster)
|
||||||
.map(n -> n.stream().map(node -> {
|
.map(description -> description.getNodes().stream()
|
||||||
|
.map(node -> {
|
||||||
BrokerDTO broker = new BrokerDTO();
|
BrokerDTO broker = new BrokerDTO();
|
||||||
broker.setId(node.id());
|
broker.setId(node.id());
|
||||||
broker.setHost(node.host());
|
broker.setHost(node.host());
|
||||||
return broker;
|
return broker;
|
||||||
}).collect(Collectors.toList())))
|
}).collect(Collectors.toList()))
|
||||||
.flatMapMany(Flux::fromIterable);
|
.flatMapMany(Flux::fromIterable);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<Node> getController(KafkaCluster cluster) {
|
public Mono<Node> getController(KafkaCluster cluster) {
|
||||||
return adminClientService
|
return adminClientService
|
||||||
.getOrCreateAdminClient(cluster)
|
.get(cluster)
|
||||||
.map(ExtendedAdminClient::getAdminClient)
|
.flatMap(ReactiveAdminClient::describeCluster)
|
||||||
.flatMap(adminClient -> ClusterUtil.toMono(adminClient.describeCluster().controller()));
|
.map(ReactiveAdminClient.ClusterDescription::getController);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@ import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
|
||||||
import com.provectus.kafka.ui.model.Feature;
|
import com.provectus.kafka.ui.model.Feature;
|
||||||
import com.provectus.kafka.ui.model.InternalTopic;
|
import com.provectus.kafka.ui.model.InternalTopic;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
|
@ -310,13 +309,8 @@ public class ClusterService {
|
||||||
public Mono<Void> deleteConsumerGroupById(String clusterName,
|
public Mono<Void> deleteConsumerGroupById(String clusterName,
|
||||||
String groupId) {
|
String groupId) {
|
||||||
return clustersStorage.getClusterByName(clusterName)
|
return clustersStorage.getClusterByName(clusterName)
|
||||||
.map(cluster -> adminClientService.getOrCreateAdminClient(cluster)
|
.map(cluster -> adminClientService.get(cluster)
|
||||||
.map(ExtendedAdminClient::getAdminClient)
|
.flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)))
|
||||||
.flatMap(adminClient ->
|
|
||||||
ClusterUtil.toMono(
|
|
||||||
adminClient.deleteConsumerGroups(List.of(groupId)).all()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
.onErrorResume(this::reThrowCustomException)
|
.onErrorResume(this::reThrowCustomException)
|
||||||
)
|
)
|
||||||
.orElse(Mono.empty());
|
.orElse(Mono.empty());
|
||||||
|
|
|
@ -8,7 +8,6 @@ import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
||||||
import com.provectus.kafka.ui.model.CleanupPolicy;
|
import com.provectus.kafka.ui.model.CleanupPolicy;
|
||||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
||||||
import com.provectus.kafka.ui.model.InternalClusterMetrics;
|
import com.provectus.kafka.ui.model.InternalClusterMetrics;
|
||||||
|
@ -31,7 +30,6 @@ import com.provectus.kafka.ui.util.ClusterUtil;
|
||||||
import com.provectus.kafka.ui.util.JmxClusterUtil;
|
import com.provectus.kafka.ui.util.JmxClusterUtil;
|
||||||
import com.provectus.kafka.ui.util.JmxMetricsName;
|
import com.provectus.kafka.ui.util.JmxMetricsName;
|
||||||
import com.provectus.kafka.ui.util.JmxMetricsValueName;
|
import com.provectus.kafka.ui.util.JmxMetricsValueName;
|
||||||
import com.provectus.kafka.ui.util.MapUtil;
|
|
||||||
import java.math.BigDecimal;
|
import java.math.BigDecimal;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
@ -46,24 +44,13 @@ import java.util.Properties;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.SneakyThrows;
|
import lombok.SneakyThrows;
|
||||||
import lombok.extern.log4j.Log4j2;
|
import lombok.extern.log4j.Log4j2;
|
||||||
import org.apache.kafka.clients.admin.AdminClient;
|
|
||||||
import org.apache.kafka.clients.admin.AlterConfigOp;
|
|
||||||
import org.apache.kafka.clients.admin.Config;
|
|
||||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
|
||||||
import org.apache.kafka.clients.admin.ConsumerGroupListing;
|
|
||||||
import org.apache.kafka.clients.admin.DescribeConfigsOptions;
|
|
||||||
import org.apache.kafka.clients.admin.ListTopicsOptions;
|
|
||||||
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||||
import org.apache.kafka.clients.admin.NewPartitions;
|
import org.apache.kafka.clients.admin.NewPartitions;
|
||||||
import org.apache.kafka.clients.admin.NewTopic;
|
|
||||||
import org.apache.kafka.clients.admin.RecordsToDelete;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
|
||||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||||
|
@ -71,7 +58,6 @@ import org.apache.kafka.clients.producer.RecordMetadata;
|
||||||
import org.apache.kafka.common.Node;
|
import org.apache.kafka.common.Node;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
import org.apache.kafka.common.TopicPartitionReplica;
|
import org.apache.kafka.common.TopicPartitionReplica;
|
||||||
import org.apache.kafka.common.config.ConfigResource;
|
|
||||||
import org.apache.kafka.common.errors.InvalidRequestException;
|
import org.apache.kafka.common.errors.InvalidRequestException;
|
||||||
import org.apache.kafka.common.errors.LogDirNotFoundException;
|
import org.apache.kafka.common.errors.LogDirNotFoundException;
|
||||||
import org.apache.kafka.common.errors.TimeoutException;
|
import org.apache.kafka.common.errors.TimeoutException;
|
||||||
|
@ -95,8 +81,6 @@ import reactor.util.function.Tuples;
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class KafkaService {
|
public class KafkaService {
|
||||||
|
|
||||||
private static final ListTopicsOptions LIST_TOPICS_OPTIONS =
|
|
||||||
new ListTopicsOptions().listInternal(true);
|
|
||||||
private final ZookeeperService zookeeperService;
|
private final ZookeeperService zookeeperService;
|
||||||
private final JmxClusterUtil jmxClusterUtil;
|
private final JmxClusterUtil jmxClusterUtil;
|
||||||
private final ClustersStorage clustersStorage;
|
private final ClustersStorage clustersStorage;
|
||||||
|
@ -122,18 +106,17 @@ public class KafkaService {
|
||||||
|
|
||||||
@SneakyThrows
|
@SneakyThrows
|
||||||
public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.flatMap(
|
.flatMap(
|
||||||
ac -> ClusterUtil.getClusterVersion(ac.getAdminClient()).flatMap(
|
ac -> ac.getClusterVersion().flatMap(
|
||||||
version ->
|
version ->
|
||||||
getClusterMetrics(ac.getAdminClient())
|
getClusterMetrics(ac)
|
||||||
.flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
|
.flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac))
|
||||||
.flatMap(clusterMetrics ->
|
.flatMap(clusterMetrics ->
|
||||||
getTopicsData(ac.getAdminClient()).flatMap(it -> {
|
getTopicsData(ac).flatMap(it -> {
|
||||||
if (cluster.getDisableLogDirsCollection() == null
|
if (cluster.getDisableLogDirsCollection() == null
|
||||||
|| !cluster.getDisableLogDirsCollection()) {
|
|| !cluster.getDisableLogDirsCollection()) {
|
||||||
return updateSegmentMetrics(
|
return updateSegmentMetrics(ac, clusterMetrics, it
|
||||||
ac.getAdminClient(), clusterMetrics, it
|
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
return emptySegmentMetrics(clusterMetrics, it);
|
return emptySegmentMetrics(clusterMetrics, it);
|
||||||
|
@ -250,16 +233,16 @@ public class KafkaService {
|
||||||
}
|
}
|
||||||
|
|
||||||
@SneakyThrows
|
@SneakyThrows
|
||||||
private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient) {
|
private Mono<List<InternalTopic>> getTopicsData(ReactiveAdminClient client) {
|
||||||
return ClusterUtil.toMono(adminClient.listTopics(LIST_TOPICS_OPTIONS).names())
|
return client.listTopics(true)
|
||||||
.flatMap(topics -> getTopicsData(adminClient, topics).collectList());
|
.flatMap(topics -> getTopicsData(client, topics).collectList());
|
||||||
}
|
}
|
||||||
|
|
||||||
private Flux<InternalTopic> getTopicsData(AdminClient adminClient, Collection<String> topics) {
|
private Flux<InternalTopic> getTopicsData(ReactiveAdminClient client, Collection<String> topics) {
|
||||||
final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
|
final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
|
||||||
loadTopicsConfig(adminClient, topics);
|
loadTopicsConfig(client, topics);
|
||||||
|
|
||||||
return ClusterUtil.toMono(adminClient.describeTopics(topics).all())
|
return client.describeTopics(topics)
|
||||||
.map(m -> m.values().stream()
|
.map(m -> m.values().stream()
|
||||||
.map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()))
|
.map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()))
|
||||||
.flatMap(internalTopics -> configsMono
|
.flatMap(internalTopics -> configsMono
|
||||||
|
@ -268,104 +251,72 @@ public class KafkaService {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private Mono<InternalClusterMetrics> getClusterMetrics(AdminClient client) {
|
private Mono<InternalClusterMetrics> getClusterMetrics(ReactiveAdminClient client) {
|
||||||
return ClusterUtil.toMono(client.describeCluster().nodes())
|
return client.describeCluster().map(desc ->
|
||||||
.flatMap(brokers ->
|
InternalClusterMetrics.builder()
|
||||||
ClusterUtil.toMono(client.describeCluster().controller()).map(
|
.brokerCount(desc.getNodes().size())
|
||||||
c -> {
|
.activeControllers(desc.getController() != null ? 1 : 0)
|
||||||
InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
|
.build()
|
||||||
InternalClusterMetrics.builder();
|
|
||||||
metricsBuilder.brokerCount(brokers.size()).activeControllers(c != null ? 1 : 0);
|
|
||||||
return metricsBuilder.build();
|
|
||||||
}
|
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@SneakyThrows
|
@SneakyThrows
|
||||||
private Mono<String> createTopic(AdminClient adminClient, NewTopic newTopic) {
|
public Mono<InternalTopic> createTopic(ReactiveAdminClient adminClient,
|
||||||
return ClusterUtil.toMono(adminClient.createTopics(Collections.singletonList(newTopic)).all(),
|
|
||||||
newTopic.name());
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
public Mono<InternalTopic> createTopic(AdminClient adminClient,
|
|
||||||
Mono<TopicCreationDTO> topicCreation) {
|
Mono<TopicCreationDTO> topicCreation) {
|
||||||
return topicCreation.flatMap(
|
return topicCreation.flatMap(topicData ->
|
||||||
topicData -> {
|
adminClient.createTopic(
|
||||||
NewTopic newTopic = new NewTopic(topicData.getName(), topicData.getPartitions(),
|
topicData.getName(),
|
||||||
topicData.getReplicationFactor().shortValue());
|
topicData.getPartitions(),
|
||||||
newTopic.configs(topicData.getConfigs());
|
topicData.getReplicationFactor().shortValue(),
|
||||||
return createTopic(adminClient, newTopic).map(v -> topicData);
|
topicData.getConfigs()
|
||||||
})
|
).thenReturn(topicData)
|
||||||
|
)
|
||||||
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
||||||
.flatMap(
|
.flatMap(topicData -> getUpdatedTopic(adminClient, topicData.getName()))
|
||||||
topicData ->
|
.switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")));
|
||||||
getTopicsData(adminClient, Collections.singleton(topicData.getName()))
|
|
||||||
.next()
|
|
||||||
).switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<InternalTopic> createTopic(
|
public Mono<InternalTopic> createTopic(
|
||||||
KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster).flatMap(ac -> createTopic(ac, topicCreation));
|
||||||
.flatMap(ac -> createTopic(ac.getAdminClient(), topicCreation));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName));
|
||||||
.map(ExtendedAdminClient::getAdminClient)
|
|
||||||
.flatMap(adminClient ->
|
|
||||||
ClusterUtil.toMono(adminClient.deleteTopics(List.of(topicName)).all())
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@SneakyThrows
|
@SneakyThrows
|
||||||
private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
|
private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
|
||||||
AdminClient adminClient, Collection<String> topicNames) {
|
ReactiveAdminClient client, Collection<String> topicNames) {
|
||||||
List<ConfigResource> resources = topicNames.stream()
|
return client.getTopicsConfig(topicNames)
|
||||||
.map(topicName -> new ConfigResource(ConfigResource.Type.TOPIC, topicName))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
return ClusterUtil.toMono(adminClient.describeConfigs(resources,
|
|
||||||
new DescribeConfigsOptions().includeSynonyms(true)).all())
|
|
||||||
.map(configs ->
|
.map(configs ->
|
||||||
configs.entrySet().stream().collect(Collectors.toMap(
|
configs.entrySet().stream().collect(Collectors.toMap(
|
||||||
c -> c.getKey().name(),
|
Map.Entry::getKey,
|
||||||
c -> c.getValue().entries().stream()
|
c -> c.getValue().stream()
|
||||||
.map(ClusterUtil::mapToInternalTopicConfig)
|
.map(ClusterUtil::mapToInternalTopicConfig)
|
||||||
.collect(Collectors.toList()))));
|
.collect(Collectors.toList()))));
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
|
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(KafkaCluster cluster) {
|
||||||
KafkaCluster cluster) {
|
return adminClientService.get(cluster).flatMap(ac ->
|
||||||
return adminClientService.getOrCreateAdminClient(cluster).flatMap(ac ->
|
ac.listConsumerGroups()
|
||||||
ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
|
.flatMap(groupIds -> getConsumerGroupsInternal(cluster, groupIds)));
|
||||||
.flatMap(s ->
|
|
||||||
getConsumerGroupsInternal(
|
|
||||||
cluster,
|
|
||||||
s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList()))
|
|
||||||
)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
|
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(KafkaCluster cluster,
|
||||||
KafkaCluster cluster, List<String> groupIds) {
|
List<String> groupIds) {
|
||||||
|
return adminClientService.get(cluster).flatMap(ac ->
|
||||||
return adminClientService.getOrCreateAdminClient(cluster).flatMap(ac ->
|
ac.describeConsumerGroups(groupIds)
|
||||||
ClusterUtil.toMono(
|
.map(Map::values)
|
||||||
ac.getAdminClient().describeConsumerGroups(groupIds).all()
|
.flatMap(descriptions ->
|
||||||
).map(Map::values)
|
|
||||||
).flatMap(descriptions ->
|
|
||||||
Flux.fromIterable(descriptions)
|
Flux.fromIterable(descriptions)
|
||||||
.parallel()
|
.parallel()
|
||||||
.flatMap(d ->
|
.flatMap(d ->
|
||||||
groupMetadata(cluster, d.groupId())
|
ac.listConsumerGroupOffsets(d.groupId())
|
||||||
.map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets))
|
.map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets))
|
||||||
)
|
)
|
||||||
.sequential()
|
.sequential()
|
||||||
.collectList()
|
.collectList()));
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<List<InternalConsumerGroup>> getConsumerGroups(
|
public Mono<List<InternalConsumerGroup>> getConsumerGroups(
|
||||||
|
@ -392,15 +343,6 @@ public class KafkaService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
|
|
||||||
String consumerGroupId) {
|
|
||||||
return adminClientService.getOrCreateAdminClient(cluster).map(ac ->
|
|
||||||
ac.getAdminClient()
|
|
||||||
.listConsumerGroupOffsets(consumerGroupId)
|
|
||||||
.partitionsToOffsetAndMetadata()
|
|
||||||
).flatMap(ClusterUtil::toMono).map(MapUtil::removeNullValues);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<TopicPartition, Long> topicPartitionsEndOffsets(
|
public Map<TopicPartition, Long> topicPartitionsEndOffsets(
|
||||||
KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
|
KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
|
try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
|
||||||
|
@ -427,47 +369,17 @@ public class KafkaService {
|
||||||
}
|
}
|
||||||
|
|
||||||
@SneakyThrows
|
@SneakyThrows
|
||||||
public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName,
|
public Mono<InternalTopic> updateTopic(KafkaCluster cluster,
|
||||||
|
String topicName,
|
||||||
TopicUpdateDTO topicUpdate) {
|
TopicUpdateDTO topicUpdate) {
|
||||||
ConfigResource topicCr = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
return adminClientService.get(cluster)
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
.flatMap(ac ->
|
||||||
.flatMap(ac -> {
|
ac.updateTopicConfig(topicName,
|
||||||
if (ac.getSupportedFeatures()
|
topicUpdate.getConfigs()).then(getUpdatedTopic(ac, topicName)));
|
||||||
.contains(ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS)) {
|
|
||||||
return incrementalAlterConfig(topicUpdate, topicCr, ac)
|
|
||||||
.flatMap(c -> getUpdatedTopic(ac, topicName));
|
|
||||||
} else {
|
|
||||||
return alterConfig(topicUpdate, topicCr, ac)
|
|
||||||
.flatMap(c -> getUpdatedTopic(ac, topicName));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<InternalTopic> getUpdatedTopic(ExtendedAdminClient ac, String topicName) {
|
private Mono<InternalTopic> getUpdatedTopic(ReactiveAdminClient ac, String topicName) {
|
||||||
return getTopicsData(ac.getAdminClient())
|
return getTopicsData(ac, List.of(topicName)).next();
|
||||||
.map(s -> s.stream()
|
|
||||||
.filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow());
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<String> incrementalAlterConfig(TopicUpdateDTO topicUpdate, ConfigResource topicCr,
|
|
||||||
ExtendedAdminClient ac) {
|
|
||||||
List<AlterConfigOp> listOp = topicUpdate.getConfigs().entrySet().stream()
|
|
||||||
.flatMap(cfg -> Stream.of(new AlterConfigOp(new ConfigEntry(cfg.getKey(), cfg.getValue()),
|
|
||||||
AlterConfigOp.OpType.SET))).collect(Collectors.toList());
|
|
||||||
return ClusterUtil.toMono(
|
|
||||||
ac.getAdminClient().incrementalAlterConfigs(Collections.singletonMap(topicCr, listOp))
|
|
||||||
.all(), topicCr.name());
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
private Mono<String> alterConfig(TopicUpdateDTO topicUpdate, ConfigResource topicCr,
|
|
||||||
ExtendedAdminClient ac) {
|
|
||||||
List<ConfigEntry> configEntries = topicUpdate.getConfigs().entrySet().stream()
|
|
||||||
.flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue())))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
Config config = new Config(configEntries);
|
|
||||||
Map<ConfigResource, Config> map = Collections.singletonMap(topicCr, config);
|
|
||||||
return ClusterUtil.toMono(ac.getAdminClient().alterConfigs(map).all(), topicCr.name());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private InternalTopic mergeWithStats(InternalTopic topic,
|
private InternalTopic mergeWithStats(InternalTopic topic,
|
||||||
|
@ -520,18 +432,12 @@ public class KafkaService {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<InternalSegmentSizeDto> updateSegmentMetrics(AdminClient ac,
|
private Mono<InternalSegmentSizeDto> updateSegmentMetrics(ReactiveAdminClient ac,
|
||||||
InternalClusterMetrics clusterMetrics,
|
InternalClusterMetrics clusterMetrics,
|
||||||
List<InternalTopic> internalTopics) {
|
List<InternalTopic> internalTopics) {
|
||||||
List<String> names =
|
return ac.describeCluster().flatMap(
|
||||||
internalTopics.stream().map(InternalTopic::getName).collect(Collectors.toList());
|
clusterDescription ->
|
||||||
return ClusterUtil.toMono(ac.describeTopics(names).all()).flatMap(topic ->
|
ac.describeLogDirs().map(log -> {
|
||||||
ClusterUtil.toMono(ac.describeCluster().nodes()).flatMap(nodes ->
|
|
||||||
|
|
||||||
ClusterUtil.toMono(
|
|
||||||
ac.describeLogDirs(
|
|
||||||
nodes.stream().map(Node::id).collect(Collectors.toList())).all()
|
|
||||||
).map(log -> {
|
|
||||||
final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
|
final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
|
||||||
log.entrySet().stream().flatMap(b ->
|
log.entrySet().stream().flatMap(b ->
|
||||||
b.getValue().entrySet().stream().flatMap(topicMap ->
|
b.getValue().entrySet().stream().flatMap(topicMap ->
|
||||||
|
@ -598,7 +504,6 @@ public class KafkaService {
|
||||||
)
|
)
|
||||||
.internalTopicWithSegmentSize(resultTopics).build();
|
.internalTopicWithSegmentSize(resultTopics).build();
|
||||||
})
|
})
|
||||||
)
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -612,15 +517,15 @@ public class KafkaService {
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<InternalClusterMetrics> fillJmxMetrics(InternalClusterMetrics internalClusterMetrics,
|
private Mono<InternalClusterMetrics> fillJmxMetrics(InternalClusterMetrics internalClusterMetrics,
|
||||||
String clusterName, AdminClient ac) {
|
String clusterName, ReactiveAdminClient ac) {
|
||||||
return fillBrokerMetrics(internalClusterMetrics, clusterName, ac)
|
return fillBrokerMetrics(internalClusterMetrics, clusterName, ac)
|
||||||
.map(this::calculateClusterMetrics);
|
.map(this::calculateClusterMetrics);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<InternalClusterMetrics> fillBrokerMetrics(
|
private Mono<InternalClusterMetrics> fillBrokerMetrics(
|
||||||
InternalClusterMetrics internalClusterMetrics, String clusterName, AdminClient ac) {
|
InternalClusterMetrics internalClusterMetrics, String clusterName, ReactiveAdminClient ac) {
|
||||||
return ClusterUtil.toMono(ac.describeCluster().nodes())
|
return ac.describeCluster()
|
||||||
.flatMapIterable(nodes -> nodes)
|
.flatMapIterable(desc -> desc.getNodes())
|
||||||
.map(broker ->
|
.map(broker ->
|
||||||
Map.of(broker.id(), InternalBrokerMetrics.builder()
|
Map.of(broker.id(), InternalBrokerMetrics.builder()
|
||||||
.metrics(getJmxMetric(clusterName, broker)).build())
|
.metrics(getJmxMetric(clusterName, broker)).build())
|
||||||
|
@ -699,14 +604,7 @@ public class KafkaService {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> deleteTopicMessages(KafkaCluster cluster, Map<TopicPartition, Long> offsets) {
|
public Mono<Void> deleteTopicMessages(KafkaCluster cluster, Map<TopicPartition, Long> offsets) {
|
||||||
var records = offsets.entrySet().stream()
|
return adminClientService.get(cluster).flatMap(ac -> ac.deleteRecords(offsets));
|
||||||
.map(entry -> Map.entry(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue())))
|
|
||||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
|
||||||
.map(ExtendedAdminClient::getAdminClient)
|
|
||||||
.flatMap(ac ->
|
|
||||||
ClusterUtil.toMono(ac.deleteRecords(records).all())
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
|
public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
|
||||||
|
@ -754,19 +652,11 @@ public class KafkaService {
|
||||||
return headers;
|
return headers;
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<InternalTopic> increaseTopicPartitions(AdminClient adminClient,
|
|
||||||
String topicName,
|
|
||||||
Map<String, NewPartitions> newPartitionsMap
|
|
||||||
) {
|
|
||||||
return ClusterUtil.toMono(adminClient.createPartitions(newPartitionsMap).all(), topicName)
|
|
||||||
.flatMap(topic -> getTopicsData(adminClient, Collections.singleton(topic)).next());
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<InternalTopic> increaseTopicPartitions(
|
public Mono<InternalTopic> increaseTopicPartitions(
|
||||||
KafkaCluster cluster,
|
KafkaCluster cluster,
|
||||||
String topicName,
|
String topicName,
|
||||||
PartitionsIncreaseDTO partitionsIncrease) {
|
PartitionsIncreaseDTO partitionsIncrease) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.flatMap(ac -> {
|
.flatMap(ac -> {
|
||||||
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
||||||
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
||||||
|
@ -787,18 +677,18 @@ public class KafkaService {
|
||||||
topicName,
|
topicName,
|
||||||
NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
|
NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
|
||||||
);
|
);
|
||||||
return increaseTopicPartitions(ac.getAdminClient(), topicName, newPartitionsMap);
|
return ac.createPartitions(newPartitionsMap)
|
||||||
|
.then(getUpdatedTopic(ac, topicName));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<InternalTopic> changeReplicationFactor(
|
private Mono<InternalTopic> changeReplicationFactor(
|
||||||
AdminClient adminClient,
|
ReactiveAdminClient adminClient,
|
||||||
String topicName,
|
String topicName,
|
||||||
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
|
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
|
||||||
) {
|
) {
|
||||||
return ClusterUtil.toMono(adminClient
|
return adminClient.alterPartitionReassignments(reassignments)
|
||||||
.alterPartitionReassignments(reassignments).all(), topicName)
|
.then(getUpdatedTopic(adminClient, topicName));
|
||||||
.flatMap(topic -> getTopicsData(adminClient, Collections.singleton(topic)).next());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -808,7 +698,7 @@ public class KafkaService {
|
||||||
KafkaCluster cluster,
|
KafkaCluster cluster,
|
||||||
String topicName,
|
String topicName,
|
||||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
ReplicationFactorChangeDTO replicationFactorChange) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.flatMap(ac -> {
|
.flatMap(ac -> {
|
||||||
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||||
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
||||||
|
@ -825,7 +715,7 @@ public class KafkaService {
|
||||||
String.format("Requested replication factor %s more than brokers count %s.",
|
String.format("Requested replication factor %s more than brokers count %s.",
|
||||||
requested, brokersCount)));
|
requested, brokersCount)));
|
||||||
}
|
}
|
||||||
return changeReplicationFactor(ac.getAdminClient(), topicName,
|
return changeReplicationFactor(ac, topicName,
|
||||||
getPartitionsReassignments(cluster, topicName,
|
getPartitionsReassignments(cluster, topicName,
|
||||||
replicationFactorChange));
|
replicationFactorChange));
|
||||||
});
|
});
|
||||||
|
@ -833,15 +723,14 @@ public class KafkaService {
|
||||||
|
|
||||||
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
|
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
|
||||||
KafkaCluster cluster, List<Integer> reqBrokers) {
|
KafkaCluster cluster, List<Integer> reqBrokers) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.map(admin -> {
|
.flatMap(admin -> {
|
||||||
List<Integer> brokers = new ArrayList<>(cluster.getBrokers());
|
List<Integer> brokers = new ArrayList<>(cluster.getBrokers());
|
||||||
if (reqBrokers != null && !reqBrokers.isEmpty()) {
|
if (reqBrokers != null && !reqBrokers.isEmpty()) {
|
||||||
brokers.retainAll(reqBrokers);
|
brokers.retainAll(reqBrokers);
|
||||||
}
|
}
|
||||||
return admin.getAdminClient().describeLogDirs(brokers);
|
return admin.describeLogDirs(brokers);
|
||||||
})
|
})
|
||||||
.flatMap(result -> ClusterUtil.toMono(result.all()))
|
|
||||||
.onErrorResume(TimeoutException.class, (TimeoutException e) -> {
|
.onErrorResume(TimeoutException.class, (TimeoutException e) -> {
|
||||||
log.error("Error during fetching log dirs", e);
|
log.error("Error during fetching log dirs", e);
|
||||||
return Mono.just(new HashMap<>());
|
return Mono.just(new HashMap<>());
|
||||||
|
@ -949,20 +838,18 @@ public class KafkaService {
|
||||||
|
|
||||||
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster, Integer broker,
|
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster, Integer broker,
|
||||||
BrokerLogdirUpdateDTO brokerLogDir) {
|
BrokerLogdirUpdateDTO brokerLogDir) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker));
|
.flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker));
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<Void> updateBrokerLogDir(ExtendedAdminClient adminMono,
|
private Mono<Void> updateBrokerLogDir(ReactiveAdminClient admin,
|
||||||
BrokerLogdirUpdateDTO b,
|
BrokerLogdirUpdateDTO b,
|
||||||
Integer broker) {
|
Integer broker) {
|
||||||
|
|
||||||
Map<TopicPartitionReplica, String> req = Map.of(
|
Map<TopicPartitionReplica, String> req = Map.of(
|
||||||
new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker),
|
new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker),
|
||||||
b.getLogDir());
|
b.getLogDir());
|
||||||
return Mono.just(adminMono)
|
return admin.alterReplicaLogDirs(req)
|
||||||
.map(admin -> admin.getAdminClient().alterReplicaLogDirs(req))
|
|
||||||
.flatMap(result -> ClusterUtil.toMono(result.all()))
|
|
||||||
.onErrorResume(UnknownTopicOrPartitionException.class,
|
.onErrorResume(UnknownTopicOrPartitionException.class,
|
||||||
e -> Mono.error(new TopicOrPartitionNotFoundException()))
|
e -> Mono.error(new TopicOrPartitionNotFoundException()))
|
||||||
.onErrorResume(LogDirNotFoundException.class,
|
.onErrorResume(LogDirNotFoundException.class,
|
||||||
|
@ -974,20 +861,8 @@ public class KafkaService {
|
||||||
Integer broker,
|
Integer broker,
|
||||||
String name,
|
String name,
|
||||||
String value) {
|
String value) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.flatMap(ac -> updateBrokerConfigByName(ac, broker, name, value));
|
.flatMap(ac -> ac.updateBrokerConfigByName(broker, name, value))
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<Void> updateBrokerConfigByName(ExtendedAdminClient admin,
|
|
||||||
Integer broker,
|
|
||||||
String name,
|
|
||||||
String value) {
|
|
||||||
ConfigResource cr = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(broker));
|
|
||||||
AlterConfigOp op = new AlterConfigOp(new ConfigEntry(name, value), AlterConfigOp.OpType.SET);
|
|
||||||
|
|
||||||
return Mono.just(admin)
|
|
||||||
.map(a -> a.getAdminClient().incrementalAlterConfigs(Map.of(cr, List.of(op))))
|
|
||||||
.flatMap(result -> ClusterUtil.toMono(result.all()))
|
|
||||||
.onErrorResume(InvalidRequestException.class,
|
.onErrorResume(InvalidRequestException.class,
|
||||||
e -> Mono.error(new InvalidRequestApiException(e.getMessage())))
|
e -> Mono.error(new InvalidRequestApiException(e.getMessage())))
|
||||||
.doOnError(log::error);
|
.doOnError(log::error);
|
||||||
|
|
|
@ -1,28 +1,23 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
import static com.google.common.base.Preconditions.checkArgument;
|
|
||||||
import static com.provectus.kafka.ui.util.ClusterUtil.toMono;
|
|
||||||
import static java.util.stream.Collectors.toMap;
|
import static java.util.stream.Collectors.toMap;
|
||||||
import static java.util.stream.Collectors.toSet;
|
import static java.util.stream.Collectors.toSet;
|
||||||
import static org.apache.kafka.common.ConsumerGroupState.DEAD;
|
import static org.apache.kafka.common.ConsumerGroupState.DEAD;
|
||||||
import static org.apache.kafka.common.ConsumerGroupState.EMPTY;
|
import static org.apache.kafka.common.ConsumerGroupState.EMPTY;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||||
import com.provectus.kafka.ui.exception.ValidationException;
|
import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.extern.log4j.Log4j2;
|
import lombok.extern.log4j.Log4j2;
|
||||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
|
||||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
|
||||||
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
@ -37,71 +32,74 @@ import reactor.core.publisher.Mono;
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
public class OffsetsResetService {
|
public class OffsetsResetService {
|
||||||
|
|
||||||
private final KafkaService kafkaService;
|
|
||||||
private final AdminClientService adminClientService;
|
private final AdminClientService adminClientService;
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToEarliest(
|
public Mono<Void> resetToEarliest(
|
||||||
KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) {
|
KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) {
|
||||||
return checkGroupCondition(cluster, group)
|
return checkGroupCondition(cluster, group)
|
||||||
.flatMap(g -> {
|
.flatMap(ac ->
|
||||||
try (var consumer = getConsumer(cluster, group)) {
|
offsets(ac, topic, partitions, OffsetSpec.earliest())
|
||||||
var targetPartitions = getTargetPartitions(consumer, topic, partitions);
|
.flatMap(offsets -> resetOffsets(ac, group, offsets)));
|
||||||
var offsets = consumer.beginningOffsets(targetPartitions);
|
|
||||||
return commitOffsets(consumer, offsets);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToLatest(
|
private Mono<Map<TopicPartition, Long>> offsets(ReactiveAdminClient client,
|
||||||
KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) {
|
String topic,
|
||||||
return checkGroupCondition(cluster, group).flatMap(
|
@Nullable Collection<Integer> partitions,
|
||||||
g -> {
|
OffsetSpec spec) {
|
||||||
try (var consumer = getConsumer(cluster, group)) {
|
if (partitions == null) {
|
||||||
var targetPartitions = getTargetPartitions(consumer, topic, partitions);
|
return client.listOffsets(topic, spec);
|
||||||
var offsets = consumer.endOffsets(targetPartitions);
|
|
||||||
return commitOffsets(consumer, offsets);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return client.listOffsets(
|
||||||
|
partitions.stream().map(idx -> new TopicPartition(topic, idx)).collect(toSet()),
|
||||||
|
spec
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToTimestamp(
|
public Mono<Void> resetToLatest(
|
||||||
|
KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) {
|
||||||
|
return checkGroupCondition(cluster, group)
|
||||||
|
.flatMap(ac ->
|
||||||
|
offsets(ac, topic, partitions, OffsetSpec.latest())
|
||||||
|
.flatMap(offsets -> resetOffsets(ac, group, offsets)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> resetToTimestamp(
|
||||||
KafkaCluster cluster, String group, String topic, Collection<Integer> partitions,
|
KafkaCluster cluster, String group, String topic, Collection<Integer> partitions,
|
||||||
long targetTimestamp) {
|
long targetTimestamp) {
|
||||||
return checkGroupCondition(cluster, group).flatMap(
|
return checkGroupCondition(cluster, group)
|
||||||
g -> {
|
.flatMap(ac ->
|
||||||
try (var consumer = getConsumer(cluster, group)) {
|
offsets(ac, topic, partitions, OffsetSpec.forTimestamp(targetTimestamp))
|
||||||
var targetPartitions = getTargetPartitions(consumer, topic, partitions);
|
.flatMap(
|
||||||
var offsets = offsetsByTimestamp(consumer, targetPartitions, targetTimestamp);
|
foundOffsets -> offsets(ac, topic, partitions, OffsetSpec.latest())
|
||||||
return commitOffsets(consumer, offsets);
|
.map(endOffsets -> editTsOffsets(foundOffsets, endOffsets))
|
||||||
}
|
)
|
||||||
}
|
.flatMap(offsets -> resetOffsets(ac, group, offsets))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> resetToOffsets(
|
public Mono<Void> resetToOffsets(
|
||||||
KafkaCluster cluster, String group, String topic, Map<Integer, Long> targetOffsets) {
|
KafkaCluster cluster, String group, String topic, Map<Integer, Long> targetOffsets) {
|
||||||
return checkGroupCondition(cluster, group).flatMap(
|
Preconditions.checkNotNull(targetOffsets);
|
||||||
g -> {
|
var partitionOffsets = targetOffsets.entrySet().stream()
|
||||||
try (var consumer = getConsumer(cluster, group)) {
|
|
||||||
var offsets = targetOffsets.entrySet().stream()
|
|
||||||
.collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
|
.collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
|
||||||
offsets = editOffsetsIfNeeded(consumer, offsets);
|
return checkGroupCondition(cluster, group).flatMap(
|
||||||
return commitOffsets(consumer, offsets);
|
ac ->
|
||||||
}
|
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest())
|
||||||
}
|
.flatMap(earliest ->
|
||||||
|
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest())
|
||||||
|
.map(latest -> editOffsetsBounds(partitionOffsets, earliest, latest))
|
||||||
|
.flatMap(offsetsToCommit -> resetOffsets(ac, group, offsetsToCommit)))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<ConsumerGroupDescription> checkGroupCondition(KafkaCluster cluster, String groupId) {
|
private Mono<ReactiveAdminClient> checkGroupCondition(KafkaCluster cluster, String groupId) {
|
||||||
return adminClientService.getOrCreateAdminClient(cluster)
|
return adminClientService.get(cluster)
|
||||||
.flatMap(ac ->
|
.flatMap(ac ->
|
||||||
// we need to call listConsumerGroups() to check group existence, because
|
// we need to call listConsumerGroups() to check group existence, because
|
||||||
// describeConsumerGroups() will return consumer group even if it doesn't exist
|
// describeConsumerGroups() will return consumer group even if it doesn't exist
|
||||||
toMono(ac.getAdminClient().listConsumerGroups().all())
|
ac.listConsumerGroups()
|
||||||
.filter(cgs -> cgs.stream().anyMatch(g -> g.groupId().equals(groupId)))
|
.filter(cgs -> cgs.stream().anyMatch(g -> g.equals(groupId)))
|
||||||
.flatMap(cgs -> toMono(
|
.flatMap(cgs -> ac.describeConsumerGroups(List.of(groupId)))
|
||||||
ac.getAdminClient().describeConsumerGroups(List.of(groupId)).all()))
|
|
||||||
.filter(cgs -> cgs.containsKey(groupId))
|
.filter(cgs -> cgs.containsKey(groupId))
|
||||||
.map(cgs -> cgs.get(groupId))
|
.map(cgs -> cgs.get(groupId))
|
||||||
.flatMap(cg -> {
|
.flatMap(cg -> {
|
||||||
|
@ -116,47 +114,18 @@ public class OffsetsResetService {
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return Mono.just(cg);
|
return Mono.just(ac);
|
||||||
})
|
})
|
||||||
.switchIfEmpty(Mono.error(new NotFoundException("Consumer group not found")))
|
.switchIfEmpty(Mono.error(new NotFoundException("Consumer group not found")))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Map<TopicPartition, Long> offsetsByTimestamp(Consumer<?, ?> consumer,
|
private Map<TopicPartition, Long> editTsOffsets(Map<TopicPartition, Long> foundTsOffsets,
|
||||||
Set<TopicPartition> partitions,
|
Map<TopicPartition, Long> endOffsets) {
|
||||||
long timestamp) {
|
|
||||||
Map<TopicPartition, OffsetAndTimestamp> timestampedOffsets = consumer
|
|
||||||
.offsetsForTimes(partitions.stream().collect(toMap(p -> p, p -> timestamp)));
|
|
||||||
|
|
||||||
var foundOffsets = timestampedOffsets.entrySet().stream()
|
|
||||||
.filter(e -> e.getValue() != null)
|
|
||||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
|
||||||
|
|
||||||
// for partitions where we didnt find offset by timestamp, we use end offsets
|
// for partitions where we didnt find offset by timestamp, we use end offsets
|
||||||
Set<TopicPartition> endOffsets = new HashSet<>(partitions);
|
Map<TopicPartition, Long> result = new HashMap<>(endOffsets);
|
||||||
endOffsets.removeAll(foundOffsets.keySet());
|
result.putAll(foundTsOffsets);
|
||||||
foundOffsets.putAll(consumer.endOffsets(endOffsets));
|
return result;
|
||||||
|
|
||||||
return foundOffsets;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Set<TopicPartition> getTargetPartitions(Consumer<?, ?> consumer, String topic,
|
|
||||||
Collection<Integer> partitions) {
|
|
||||||
var allPartitions = allTopicPartitions(consumer, topic);
|
|
||||||
if (partitions == null || partitions.isEmpty()) {
|
|
||||||
return allPartitions;
|
|
||||||
} else {
|
|
||||||
return partitions.stream()
|
|
||||||
.map(idx -> new TopicPartition(topic, idx))
|
|
||||||
.peek(tp -> checkArgument(allPartitions.contains(tp), "Invalid partition %s", tp))
|
|
||||||
.collect(toSet());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private Set<TopicPartition> allTopicPartitions(Consumer<?, ?> consumer, String topic) {
|
|
||||||
return consumer.partitionsFor(topic).stream()
|
|
||||||
.map(info -> new TopicPartition(topic, info.partition()))
|
|
||||||
.collect(toSet());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -164,10 +133,9 @@ public class OffsetsResetService {
|
||||||
* fail we reset offset to either earliest or latest offsets (To follow logic from
|
* fail we reset offset to either earliest or latest offsets (To follow logic from
|
||||||
* kafka.admin.ConsumerGroupCommand.scala)
|
* kafka.admin.ConsumerGroupCommand.scala)
|
||||||
*/
|
*/
|
||||||
private Map<TopicPartition, Long> editOffsetsIfNeeded(Consumer<?, ?> consumer,
|
private Map<TopicPartition, Long> editOffsetsBounds(Map<TopicPartition, Long> offsetsToCheck,
|
||||||
Map<TopicPartition, Long> offsetsToCheck) {
|
Map<TopicPartition, Long> earliestOffsets,
|
||||||
var earliestOffsets = consumer.beginningOffsets(offsetsToCheck.keySet());
|
Map<TopicPartition, Long> latestOffsets) {
|
||||||
var latestOffsets = consumer.endOffsets(offsetsToCheck.keySet());
|
|
||||||
var result = new HashMap<TopicPartition, Long>();
|
var result = new HashMap<TopicPartition, Long>();
|
||||||
offsetsToCheck.forEach((tp, offset) -> {
|
offsetsToCheck.forEach((tp, offset) -> {
|
||||||
if (earliestOffsets.get(tp) > offset) {
|
if (earliestOffsets.get(tp) > offset) {
|
||||||
|
@ -184,17 +152,10 @@ public class OffsetsResetService {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<Map<TopicPartition, OffsetAndMetadata>> commitOffsets(
|
private Mono<Void> resetOffsets(ReactiveAdminClient adminClient,
|
||||||
Consumer<?, ?> consumer, Map<TopicPartition, Long> offsets
|
String groupId,
|
||||||
) {
|
Map<TopicPartition, Long> offsets) {
|
||||||
var toCommit = offsets.entrySet().stream()
|
return adminClient.alterConsumerGroupOffsets(groupId, offsets);
|
||||||
.collect(toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue())));
|
|
||||||
consumer.commitSync(toCommit);
|
|
||||||
return Mono.just(toCommit);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Consumer<?, ?> getConsumer(KafkaCluster cluster, String groupId) {
|
|
||||||
return kafkaService.createConsumer(cluster, Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,311 @@
|
||||||
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
|
import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly;
|
||||||
|
import static java.util.stream.Collectors.toList;
|
||||||
|
import static java.util.stream.Collectors.toMap;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.util.MapUtil;
|
||||||
|
import com.provectus.kafka.ui.util.NumberUtil;
|
||||||
|
import java.io.Closeable;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import lombok.Value;
|
||||||
|
import lombok.extern.log4j.Log4j2;
|
||||||
|
import org.apache.kafka.clients.admin.AdminClient;
|
||||||
|
import org.apache.kafka.clients.admin.AlterConfigOp;
|
||||||
|
import org.apache.kafka.clients.admin.Config;
|
||||||
|
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||||
|
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||||
|
import org.apache.kafka.clients.admin.ConsumerGroupListing;
|
||||||
|
import org.apache.kafka.clients.admin.DescribeClusterOptions;
|
||||||
|
import org.apache.kafka.clients.admin.DescribeConfigsOptions;
|
||||||
|
import org.apache.kafka.clients.admin.ListTopicsOptions;
|
||||||
|
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||||
|
import org.apache.kafka.clients.admin.NewPartitions;
|
||||||
|
import org.apache.kafka.clients.admin.NewTopic;
|
||||||
|
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||||
|
import org.apache.kafka.clients.admin.RecordsToDelete;
|
||||||
|
import org.apache.kafka.clients.admin.TopicDescription;
|
||||||
|
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||||
|
import org.apache.kafka.common.KafkaFuture;
|
||||||
|
import org.apache.kafka.common.Node;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.TopicPartitionReplica;
|
||||||
|
import org.apache.kafka.common.acl.AclOperation;
|
||||||
|
import org.apache.kafka.common.config.ConfigResource;
|
||||||
|
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||||
|
import reactor.core.publisher.Mono;
|
||||||
|
|
||||||
|
|
||||||
|
@Log4j2
|
||||||
|
@RequiredArgsConstructor
|
||||||
|
public class ReactiveAdminClient implements Closeable {
|
||||||
|
|
||||||
|
private enum SupportedFeature {
|
||||||
|
INCREMENTAL_ALTER_CONFIGS,
|
||||||
|
ALTER_CONFIGS
|
||||||
|
}
|
||||||
|
|
||||||
|
@Value
|
||||||
|
public static class ClusterDescription {
|
||||||
|
Node controller;
|
||||||
|
String clusterId;
|
||||||
|
Collection<Node> nodes;
|
||||||
|
Set<AclOperation> authorizedOperations;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static Mono<ReactiveAdminClient> create(AdminClient adminClient) {
|
||||||
|
return getClusterVersionImpl(adminClient)
|
||||||
|
.map(ver ->
|
||||||
|
new ReactiveAdminClient(
|
||||||
|
adminClient,
|
||||||
|
Set.of(getSupportedUpdateFeatureForVersion(ver))));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static SupportedFeature getSupportedUpdateFeatureForVersion(String versionStr) {
|
||||||
|
float version = NumberUtil.parserClusterVersion(versionStr);
|
||||||
|
return version <= 2.3f
|
||||||
|
? SupportedFeature.ALTER_CONFIGS
|
||||||
|
: SupportedFeature.INCREMENTAL_ALTER_CONFIGS;
|
||||||
|
}
|
||||||
|
|
||||||
|
//TODO: discuss - maybe we should map kafka-library's exceptions to our exceptions here
|
||||||
|
private static <T> Mono<T> toMono(KafkaFuture<T> future) {
|
||||||
|
return Mono.create(sink -> future.whenComplete((res, ex) -> {
|
||||||
|
if (ex != null) {
|
||||||
|
sink.error(ex);
|
||||||
|
} else {
|
||||||
|
sink.success(res);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
//---------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
private final AdminClient client;
|
||||||
|
private final Set<SupportedFeature> features;
|
||||||
|
|
||||||
|
public Mono<Set<String>> listTopics(boolean listInternal) {
|
||||||
|
return toMono(client.listTopics(new ListTopicsOptions().listInternal(listInternal)).names());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> deleteTopic(String topicName) {
|
||||||
|
return toMono(client.deleteTopics(List.of(topicName)).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<String, List<ConfigEntry>>> getTopicsConfig(Collection<String> topicNames) {
|
||||||
|
List<ConfigResource> resources = topicNames.stream()
|
||||||
|
.map(topicName -> new ConfigResource(ConfigResource.Type.TOPIC, topicName))
|
||||||
|
.collect(toList());
|
||||||
|
|
||||||
|
return toMono(
|
||||||
|
client.describeConfigs(
|
||||||
|
resources,
|
||||||
|
new DescribeConfigsOptions().includeSynonyms(true)
|
||||||
|
).all())
|
||||||
|
.map(config -> config.entrySet().stream()
|
||||||
|
.collect(toMap(
|
||||||
|
c -> c.getKey().name(),
|
||||||
|
c -> new ArrayList<>(c.getValue().entries()))));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(List<Integer> brokerIds) {
|
||||||
|
List<ConfigResource> resources = brokerIds.stream()
|
||||||
|
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
||||||
|
.collect(toList());
|
||||||
|
return toMono(client.describeConfigs(resources).all())
|
||||||
|
.map(config -> config.entrySet().stream()
|
||||||
|
.collect(toMap(
|
||||||
|
c -> Integer.valueOf(c.getKey().name()),
|
||||||
|
c -> new ArrayList<>(c.getValue().entries()))));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<String, TopicDescription>> describeTopics(Collection<String> topics) {
|
||||||
|
return toMono(client.describeTopics(topics).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs() {
|
||||||
|
return describeCluster()
|
||||||
|
.map(d -> d.getNodes().stream().map(Node::id).collect(toList()))
|
||||||
|
.flatMap(this::describeLogDirs);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs(
|
||||||
|
Collection<Integer> brokerIds) {
|
||||||
|
return toMono(client.describeLogDirs(brokerIds).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<ClusterDescription> describeCluster() {
|
||||||
|
var r = client.describeCluster(new DescribeClusterOptions().includeAuthorizedOperations(true));
|
||||||
|
var all = KafkaFuture.allOf(r.nodes(), r.clusterId(), r.controller(), r.authorizedOperations());
|
||||||
|
return Mono.create(sink -> all.whenComplete((res, ex) -> {
|
||||||
|
if (ex != null) {
|
||||||
|
sink.error(ex);
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
sink.success(
|
||||||
|
new ClusterDescription(
|
||||||
|
getUninterruptibly(r.controller()),
|
||||||
|
getUninterruptibly(r.clusterId()),
|
||||||
|
getUninterruptibly(r.nodes()),
|
||||||
|
getUninterruptibly(r.authorizedOperations())
|
||||||
|
)
|
||||||
|
);
|
||||||
|
} catch (ExecutionException e) {
|
||||||
|
// can't be here, because all futures already completed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Mono<String> getClusterVersionImpl(AdminClient client) {
|
||||||
|
return toMono(client.describeCluster().controller()).flatMap(controller ->
|
||||||
|
toMono(client.describeConfigs(
|
||||||
|
List.of(new ConfigResource(
|
||||||
|
ConfigResource.Type.BROKER, String.valueOf(controller.id()))))
|
||||||
|
.all()
|
||||||
|
.thenApply(configs ->
|
||||||
|
configs.values().stream()
|
||||||
|
.map(Config::entries)
|
||||||
|
.flatMap(Collection::stream)
|
||||||
|
.filter(entry -> entry.name().contains("inter.broker.protocol.version"))
|
||||||
|
.findFirst().map(ConfigEntry::value)
|
||||||
|
.orElse("1.0-UNKNOWN")
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<String> getClusterVersion() {
|
||||||
|
return getClusterVersionImpl(client);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> deleteConsumerGroups(Collection<String> groupIds) {
|
||||||
|
return toMono(client.deleteConsumerGroups(groupIds).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> createTopic(String name,
|
||||||
|
int numPartitions,
|
||||||
|
short replicationFactor,
|
||||||
|
Map<String, String> configs) {
|
||||||
|
return toMono(client.createTopics(
|
||||||
|
List.of(new NewTopic(name, numPartitions, replicationFactor).configs(configs))).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> alterPartitionReassignments(
|
||||||
|
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments) {
|
||||||
|
return toMono(client.alterPartitionReassignments(reassignments).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> createPartitions(Map<String, NewPartitions> newPartitionsMap) {
|
||||||
|
return toMono(client.createPartitions(newPartitionsMap).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> updateTopicConfig(String topicName, Map<String, String> configs) {
|
||||||
|
if (features.contains(SupportedFeature.INCREMENTAL_ALTER_CONFIGS)) {
|
||||||
|
return incrementalAlterConfig(topicName, configs);
|
||||||
|
} else {
|
||||||
|
return alterConfig(topicName, configs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<List<String>> listConsumerGroups() {
|
||||||
|
return toMono(client.listConsumerGroups().all())
|
||||||
|
.map(lst -> lst.stream().map(ConsumerGroupListing::groupId).collect(toList()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<String, ConsumerGroupDescription>> describeConsumerGroups(List<String> groupIds) {
|
||||||
|
return toMono(client.describeConsumerGroups(groupIds).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<TopicPartition, OffsetAndMetadata>> listConsumerGroupOffsets(String groupId) {
|
||||||
|
return toMono(client.listConsumerGroupOffsets(groupId).partitionsToOffsetAndMetadata())
|
||||||
|
.map(MapUtil::removeNullValues);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> alterConsumerGroupOffsets(String groupId, Map<TopicPartition, Long> offsets) {
|
||||||
|
return toMono(client.alterConsumerGroupOffsets(
|
||||||
|
groupId,
|
||||||
|
offsets.entrySet().stream()
|
||||||
|
.collect(toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue()))))
|
||||||
|
.all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<TopicPartition, Long>> listOffsets(String topic,
|
||||||
|
OffsetSpec offsetSpec) {
|
||||||
|
return topicPartitions(topic).flatMap(tps -> listOffsets(tps, offsetSpec));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Map<TopicPartition, Long>> listOffsets(Set<TopicPartition> partitions,
|
||||||
|
OffsetSpec offsetSpec) {
|
||||||
|
return toMono(
|
||||||
|
client.listOffsets(partitions.stream().collect(toMap(tp -> tp, tp -> offsetSpec))).all())
|
||||||
|
.map(offsets -> offsets.entrySet()
|
||||||
|
.stream()
|
||||||
|
// filtering partitions for which offsets were not found
|
||||||
|
.filter(e -> e.getValue().offset() >= 0)
|
||||||
|
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<Set<TopicPartition>> topicPartitions(String topic) {
|
||||||
|
return toMono(client.describeTopics(List.of(topic)).all())
|
||||||
|
.map(r -> r.values().stream()
|
||||||
|
.findFirst()
|
||||||
|
.stream()
|
||||||
|
.flatMap(d -> d.partitions().stream())
|
||||||
|
.map(p -> new TopicPartition(topic, p.partition()))
|
||||||
|
.collect(Collectors.toSet())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> updateBrokerConfigByName(Integer brokerId, String name, String value) {
|
||||||
|
ConfigResource cr = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(brokerId));
|
||||||
|
AlterConfigOp op = new AlterConfigOp(new ConfigEntry(name, value), AlterConfigOp.OpType.SET);
|
||||||
|
return toMono(client.incrementalAlterConfigs(Map.of(cr, List.of(op))).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> deleteRecords(Map<TopicPartition, Long> offsets) {
|
||||||
|
var records = offsets.entrySet().stream()
|
||||||
|
.map(entry -> Map.entry(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue())))
|
||||||
|
.collect(toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||||
|
return toMono(client.deleteRecords(records).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
public Mono<Void> alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment) {
|
||||||
|
return toMono(client.alterReplicaLogDirs(replicaAssignment).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Mono<Void> incrementalAlterConfig(String topicName, Map<String, String> configs) {
|
||||||
|
var config = configs.entrySet().stream()
|
||||||
|
.flatMap(cfg -> Stream.of(
|
||||||
|
new AlterConfigOp(
|
||||||
|
new ConfigEntry(
|
||||||
|
cfg.getKey(),
|
||||||
|
cfg.getValue()),
|
||||||
|
AlterConfigOp.OpType.SET)))
|
||||||
|
.collect(toList());
|
||||||
|
var topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||||
|
return toMono(client.incrementalAlterConfigs(Map.of(topicResource, config)).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("deprecation")
|
||||||
|
private Mono<Void> alterConfig(String topicName, Map<String, String> configs) {
|
||||||
|
List<ConfigEntry> configEntries = configs.entrySet().stream()
|
||||||
|
.flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue())))
|
||||||
|
.collect(toList());
|
||||||
|
Config config = new Config(configEntries);
|
||||||
|
var topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||||
|
return toMono(client.alterConfigs(Map.of(topicResource, config)).all());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() {
|
||||||
|
client.close();
|
||||||
|
}
|
||||||
|
}
|
|
@ -8,7 +8,6 @@ import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupStateDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupStateDTO;
|
||||||
import com.provectus.kafka.ui.model.ConsumerGroupTopicPartitionDTO;
|
import com.provectus.kafka.ui.model.ConsumerGroupTopicPartitionDTO;
|
||||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
|
||||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||||
import com.provectus.kafka.ui.model.InternalPartition;
|
import com.provectus.kafka.ui.model.InternalPartition;
|
||||||
|
@ -24,7 +23,6 @@ import java.time.OffsetDateTime;
|
||||||
import java.time.ZoneId;
|
import java.time.ZoneId;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -33,48 +31,21 @@ import java.util.Set;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
import lombok.extern.log4j.Log4j2;
|
import lombok.extern.log4j.Log4j2;
|
||||||
import org.apache.kafka.clients.admin.AdminClient;
|
|
||||||
import org.apache.kafka.clients.admin.Config;
|
|
||||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||||
import org.apache.kafka.clients.admin.TopicDescription;
|
import org.apache.kafka.clients.admin.TopicDescription;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||||
import org.apache.kafka.common.KafkaFuture;
|
|
||||||
import org.apache.kafka.common.Node;
|
import org.apache.kafka.common.Node;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
import org.apache.kafka.common.config.ConfigResource;
|
|
||||||
import org.apache.kafka.common.record.TimestampType;
|
import org.apache.kafka.common.record.TimestampType;
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Log4j2
|
@Log4j2
|
||||||
public class ClusterUtil {
|
public class ClusterUtil {
|
||||||
|
|
||||||
private static final String CLUSTER_VERSION_PARAM_KEY = "inter.broker.protocol.version";
|
|
||||||
|
|
||||||
private static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC");
|
private static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC");
|
||||||
|
|
||||||
public static <T> Mono<T> toMono(KafkaFuture<T> future) {
|
|
||||||
return Mono.create(sink -> future.whenComplete((res, ex) -> {
|
|
||||||
if (ex != null) {
|
|
||||||
sink.error(ex);
|
|
||||||
} else {
|
|
||||||
sink.success(res);
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Mono<String> toMono(KafkaFuture<Void> future, String topicName) {
|
|
||||||
return Mono.create(sink -> future.whenComplete((res, ex) -> {
|
|
||||||
if (ex != null) {
|
|
||||||
sink.error(ex);
|
|
||||||
} else {
|
|
||||||
sink.success(topicName);
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
public static InternalConsumerGroup convertToInternalConsumerGroup(
|
public static InternalConsumerGroup convertToInternalConsumerGroup(
|
||||||
ConsumerGroupDescription description, Map<TopicPartition, OffsetAndMetadata> offsets) {
|
ConsumerGroupDescription description, Map<TopicPartition, OffsetAndMetadata> offsets) {
|
||||||
|
|
||||||
|
@ -334,46 +305,6 @@ public class ClusterUtil {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Mono<Set<ExtendedAdminClient.SupportedFeature>> getSupportedFeatures(
|
|
||||||
AdminClient adminClient) {
|
|
||||||
return getClusterVersion(adminClient)
|
|
||||||
.map(ClusterUtil::getSupportedUpdateFeature)
|
|
||||||
.map(Collections::singleton);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(String version) {
|
|
||||||
try {
|
|
||||||
final String[] parts = version.split("\\.");
|
|
||||||
if (parts.length > 2) {
|
|
||||||
version = parts[0] + "." + parts[1];
|
|
||||||
}
|
|
||||||
return Float.parseFloat(version.split("-")[0]) <= 2.3f
|
|
||||||
? ExtendedAdminClient.SupportedFeature.ALTER_CONFIGS :
|
|
||||||
ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS;
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.error("Conversion clusterVersion {} to float value failed", version);
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Mono<String> getClusterVersion(AdminClient adminClient) {
|
|
||||||
return ClusterUtil.toMono(adminClient.describeCluster().controller())
|
|
||||||
.map(Node::id)
|
|
||||||
.map(id -> Collections
|
|
||||||
.singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
|
|
||||||
.map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
|
|
||||||
.flatMap(ClusterUtil::toMono)
|
|
||||||
.map(ClusterUtil::getClusterVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String getClusterVersion(Map<ConfigResource, Config> configs) {
|
|
||||||
return configs.values().stream()
|
|
||||||
.map(Config::entries)
|
|
||||||
.flatMap(Collection::stream)
|
|
||||||
.filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
|
|
||||||
.findFirst().map(ConfigEntry::value).orElse("1.0-UNKNOWN");
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
|
public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
|
||||||
return streamOfMaps
|
return streamOfMaps
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package com.provectus.kafka.ui.util;
|
package com.provectus.kafka.ui.util;
|
||||||
|
|
||||||
|
import lombok.extern.log4j.Log4j2;
|
||||||
import org.apache.commons.lang3.math.NumberUtils;
|
import org.apache.commons.lang3.math.NumberUtils;
|
||||||
|
|
||||||
|
@Log4j2
|
||||||
public class NumberUtil {
|
public class NumberUtil {
|
||||||
|
|
||||||
private NumberUtil() {
|
private NumberUtil() {
|
||||||
|
@ -10,4 +12,17 @@ public class NumberUtil {
|
||||||
public static boolean isNumeric(Object value) {
|
public static boolean isNumeric(Object value) {
|
||||||
return value != null && NumberUtils.isCreatable(value.toString());
|
return value != null && NumberUtils.isCreatable(value.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static float parserClusterVersion(String version) {
|
||||||
|
try {
|
||||||
|
final String[] parts = version.split("\\.");
|
||||||
|
if (parts.length > 2) {
|
||||||
|
version = parts[0] + "." + parts[1];
|
||||||
|
}
|
||||||
|
return Float.parseFloat(version.split("-")[0]);
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Conversion clusterVersion {} to float value failed", version);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -61,19 +61,21 @@ public class ReadOnlyModeTests extends AbstractBaseTest {
|
||||||
.name(topicName)
|
.name(topicName)
|
||||||
.partitions(1)
|
.partitions(1)
|
||||||
.replicationFactor(1)
|
.replicationFactor(1)
|
||||||
.configs(Map.of())
|
|
||||||
)
|
)
|
||||||
.exchange()
|
.exchange()
|
||||||
.expectStatus()
|
.expectStatus()
|
||||||
.isOk();
|
.isOk();
|
||||||
|
|
||||||
webTestClient.patch()
|
webTestClient.patch()
|
||||||
.uri("/api/clusters/{clusterName}/topics/{topicName}", LOCAL, topicName)
|
.uri("/api/clusters/{clusterName}/topics/{topicName}", LOCAL, topicName)
|
||||||
.bodyValue(new TopicUpdateDTO()
|
.bodyValue(new TopicUpdateDTO()
|
||||||
.configs(Map.of())
|
.configs(Map.of("cleanup.policy", "compact"))
|
||||||
)
|
)
|
||||||
.exchange()
|
.exchange()
|
||||||
.expectStatus()
|
.expectStatus()
|
||||||
.isOk();
|
.isOk()
|
||||||
|
.expectBody()
|
||||||
|
.jsonPath("$.cleanUpPolicy").isEqualTo("COMPACT");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -19,11 +19,13 @@ import java.util.stream.Stream;
|
||||||
import org.apache.kafka.clients.admin.NewTopic;
|
import org.apache.kafka.clients.admin.NewTopic;
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||||
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||||
import org.apache.kafka.common.serialization.BytesSerializer;
|
import org.apache.kafka.common.serialization.BytesSerializer;
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
import org.junit.jupiter.api.AfterEach;
|
import org.junit.jupiter.api.AfterEach;
|
||||||
|
@ -46,17 +48,13 @@ public class OffsetsResetServiceTest extends AbstractBaseTest {
|
||||||
private final String groupId = "OffsetsResetServiceTestGroup-" + UUID.randomUUID();
|
private final String groupId = "OffsetsResetServiceTestGroup-" + UUID.randomUUID();
|
||||||
private final String topic = "OffsetsResetServiceTestTopic-" + UUID.randomUUID();
|
private final String topic = "OffsetsResetServiceTestTopic-" + UUID.randomUUID();
|
||||||
|
|
||||||
private KafkaService kafkaService;
|
|
||||||
private OffsetsResetService offsetsResetService;
|
private OffsetsResetService offsetsResetService;
|
||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void init() {
|
void init() {
|
||||||
AdminClientServiceImpl adminClientService = new AdminClientServiceImpl();
|
AdminClientServiceImpl adminClientService = new AdminClientServiceImpl();
|
||||||
BrokerService brokerService = new BrokerServiceImpl(adminClientService);
|
|
||||||
FeatureService featureService = new FeatureServiceImpl(brokerService);
|
|
||||||
adminClientService.setClientTimeout(5_000);
|
adminClientService.setClientTimeout(5_000);
|
||||||
kafkaService = new KafkaService(null, null, null, null, adminClientService, featureService);
|
offsetsResetService = new OffsetsResetService(adminClientService);
|
||||||
offsetsResetService = new OffsetsResetService(kafkaService, adminClientService);
|
|
||||||
|
|
||||||
createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
|
createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
|
||||||
createConsumerGroup();
|
createConsumerGroup();
|
||||||
|
@ -228,7 +226,14 @@ public class OffsetsResetServiceTest extends AbstractBaseTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
private Consumer<?, ?> groupConsumer() {
|
private Consumer<?, ?> groupConsumer() {
|
||||||
return kafkaService.createConsumer(CLUSTER, Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId));
|
Properties props = new Properties();
|
||||||
|
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
|
||||||
|
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.getBootstrapServers());
|
||||||
|
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||||
|
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||||
|
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||||
|
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
|
||||||
|
return new KafkaConsumer<>(props);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -200,7 +200,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
||||||
.withKeySchema(AVRO_SCHEMA_PRIMITIVE_STRING)
|
.withKeySchema(AVRO_SCHEMA_PRIMITIVE_STRING)
|
||||||
.withValueSchema(AVRO_SCHEMA_PRIMITIVE_INT)
|
.withValueSchema(AVRO_SCHEMA_PRIMITIVE_INT)
|
||||||
.withMsgToSend(
|
.withMsgToSend(
|
||||||
new CreateTopicMessage()
|
new CreateTopicMessageDTO()
|
||||||
.key("\"some string\"")
|
.key("\"some string\"")
|
||||||
.content("123")
|
.content("123")
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Reference in a new issue