Msk serverless support (BE) (#2737)
Msk serverless support: 1. ReactiveAdminClient.loadBrokersConfig returns empty map if configs retrieval not supported by kafka backend 2. ReactiveAdminClient.toMone exception unwrapping added 3. FeatureService delete topics enabled set true by default 4. TopicCreationDTO.replicationFactor made optional Co-authored-by: iliax <ikuramshin@provectus.com>
This commit is contained in:
parent
0939d6140f
commit
19e38fb1bf
6 changed files with 50 additions and 37 deletions
|
@ -6,6 +6,10 @@ public class TopicMetadataException extends CustomBaseException {
|
||||||
super(message);
|
super(message);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public TopicMetadataException(String message, Throwable cause) {
|
||||||
|
super(message, cause);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ErrorCode getErrorCode() {
|
public ErrorCode getErrorCode() {
|
||||||
return ErrorCode.INVALID_ENTITY_STATE;
|
return ErrorCode.INVALID_ENTITY_STATE;
|
||||||
|
|
|
@ -47,10 +47,7 @@ public class BrokerService {
|
||||||
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
private Mono<List<ConfigEntry>> loadBrokersConfig(
|
||||||
KafkaCluster cluster, Integer brokerId) {
|
KafkaCluster cluster, Integer brokerId) {
|
||||||
return loadBrokersConfig(cluster, Collections.singletonList(brokerId))
|
return loadBrokersConfig(cluster, Collections.singletonList(brokerId))
|
||||||
.map(map -> map.values().stream()
|
.map(map -> map.values().stream().findFirst().orElse(List.of()));
|
||||||
.findFirst()
|
|
||||||
.orElseThrow(() -> new NotFoundException(
|
|
||||||
String.format("Config for broker %s not found", brokerId))));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId) {
|
private Flux<InternalBrokerConfig> getBrokersConfig(KafkaCluster cluster, Integer brokerId) {
|
||||||
|
@ -81,13 +78,6 @@ public class BrokerService {
|
||||||
.flatMapMany(Flux::fromIterable);
|
.flatMapMany(Flux::fromIterable);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Node> getController(KafkaCluster cluster) {
|
|
||||||
return adminClientService
|
|
||||||
.get(cluster)
|
|
||||||
.flatMap(ReactiveAdminClient::describeCluster)
|
|
||||||
.map(ReactiveAdminClient.ClusterDescription::getController);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster,
|
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster,
|
||||||
Integer broker,
|
Integer broker,
|
||||||
BrokerLogdirUpdateDTO brokerLogDir) {
|
BrokerLogdirUpdateDTO brokerLogDir) {
|
||||||
|
|
|
@ -60,6 +60,6 @@ public class FeatureService {
|
||||||
.filter(e -> e.name().equals(DELETE_TOPIC_ENABLED_SERVER_PROPERTY))
|
.filter(e -> e.name().equals(DELETE_TOPIC_ENABLED_SERVER_PROPERTY))
|
||||||
.map(e -> Boolean.parseBoolean(e.value()))
|
.map(e -> Boolean.parseBoolean(e.value()))
|
||||||
.findFirst()
|
.findFirst()
|
||||||
.orElse(false));
|
.orElse(true));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.CompletionException;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
@ -54,6 +55,7 @@ import org.apache.kafka.common.acl.AclOperation;
|
||||||
import org.apache.kafka.common.config.ConfigResource;
|
import org.apache.kafka.common.config.ConfigResource;
|
||||||
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
||||||
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
||||||
|
import org.apache.kafka.common.errors.InvalidRequestException;
|
||||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
|
@ -81,7 +83,7 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Mono<ReactiveAdminClient> create(AdminClient adminClient) {
|
public static Mono<ReactiveAdminClient> create(AdminClient adminClient) {
|
||||||
return getClusterVersionImpl(adminClient)
|
return getClusterVersion(adminClient)
|
||||||
.map(ver ->
|
.map(ver ->
|
||||||
new ReactiveAdminClient(
|
new ReactiveAdminClient(
|
||||||
adminClient,
|
adminClient,
|
||||||
|
@ -105,7 +107,13 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
private static <T> Mono<T> toMono(KafkaFuture<T> future) {
|
private static <T> Mono<T> toMono(KafkaFuture<T> future) {
|
||||||
return Mono.<T>create(sink -> future.whenComplete((res, ex) -> {
|
return Mono.<T>create(sink -> future.whenComplete((res, ex) -> {
|
||||||
if (ex != null) {
|
if (ex != null) {
|
||||||
|
// KafkaFuture doc is unclear about what exception wrapper will be used
|
||||||
|
// (from docs it should be ExecutionException, be we actually see CompletionException, so checking both
|
||||||
|
if (ex instanceof CompletionException || ex instanceof ExecutionException) {
|
||||||
|
sink.error(ex.getCause()); //unwrapping exception
|
||||||
|
} else {
|
||||||
sink.error(ex);
|
sink.error(ex);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
sink.success(res);
|
sink.success(res);
|
||||||
}
|
}
|
||||||
|
@ -166,17 +174,29 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
c -> List.copyOf(c.getValue().entries()))));
|
c -> List.copyOf(c.getValue().entries()))));
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(List<Integer> brokerIds) {
|
private static Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(AdminClient client, List<Integer> brokerIds) {
|
||||||
List<ConfigResource> resources = brokerIds.stream()
|
List<ConfigResource> resources = brokerIds.stream()
|
||||||
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
||||||
.collect(toList());
|
.collect(toList());
|
||||||
return toMono(client.describeConfigs(resources).all())
|
return toMono(client.describeConfigs(resources).all())
|
||||||
|
.doOnError(InvalidRequestException.class,
|
||||||
|
th -> log.trace("Error while getting broker {} configs", brokerIds, th))
|
||||||
|
// some kafka backends (like MSK serverless) do not support broker's configs retrieval,
|
||||||
|
// in that case InvalidRequestException will be thrown
|
||||||
|
.onErrorResume(InvalidRequestException.class, th -> Mono.just(Map.of()))
|
||||||
.map(config -> config.entrySet().stream()
|
.map(config -> config.entrySet().stream()
|
||||||
.collect(toMap(
|
.collect(toMap(
|
||||||
c -> Integer.valueOf(c.getKey().name()),
|
c -> Integer.valueOf(c.getKey().name()),
|
||||||
c -> new ArrayList<>(c.getValue().entries()))));
|
c -> new ArrayList<>(c.getValue().entries()))));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return per-broker configs or empty map if broker's configs retrieval not supported.
|
||||||
|
*/
|
||||||
|
public Mono<Map<Integer, List<ConfigEntry>>> loadBrokersConfig(List<Integer> brokerIds) {
|
||||||
|
return loadBrokersConfig(client, brokerIds);
|
||||||
|
}
|
||||||
|
|
||||||
public Mono<Map<String, TopicDescription>> describeTopics() {
|
public Mono<Map<String, TopicDescription>> describeTopics() {
|
||||||
return listTopics(true).flatMap(this::describeTopics);
|
return listTopics(true).flatMap(this::describeTopics);
|
||||||
}
|
}
|
||||||
|
@ -280,20 +300,16 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Mono<String> getClusterVersionImpl(AdminClient client) {
|
private static Mono<String> getClusterVersion(AdminClient client) {
|
||||||
return toMono(client.describeCluster().controller()).flatMap(controller ->
|
return toMono(client.describeCluster().controller())
|
||||||
toMono(client.describeConfigs(
|
.flatMap(controller -> loadBrokersConfig(client, List.of(controller.id())))
|
||||||
List.of(new ConfigResource(
|
.map(configs -> configs.values().stream()
|
||||||
ConfigResource.Type.BROKER, String.valueOf(controller.id()))))
|
|
||||||
.all()
|
|
||||||
.thenApply(configs ->
|
|
||||||
configs.values().stream()
|
|
||||||
.map(Config::entries)
|
|
||||||
.flatMap(Collection::stream)
|
.flatMap(Collection::stream)
|
||||||
.filter(entry -> entry.name().contains("inter.broker.protocol.version"))
|
.filter(entry -> entry.name().contains("inter.broker.protocol.version"))
|
||||||
.findFirst().map(ConfigEntry::value)
|
.findFirst()
|
||||||
|
.map(ConfigEntry::value)
|
||||||
.orElse("1.0-UNKNOWN")
|
.orElse("1.0-UNKNOWN")
|
||||||
)));
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> deleteConsumerGroups(Collection<String> groupIds) {
|
public Mono<Void> deleteConsumerGroups(Collection<String> groupIds) {
|
||||||
|
@ -306,10 +322,14 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
|
|
||||||
public Mono<Void> createTopic(String name,
|
public Mono<Void> createTopic(String name,
|
||||||
int numPartitions,
|
int numPartitions,
|
||||||
short replicationFactor,
|
@Nullable Integer replicationFactor,
|
||||||
Map<String, String> configs) {
|
Map<String, String> configs) {
|
||||||
return toMono(client.createTopics(
|
var newTopic = new NewTopic(
|
||||||
List.of(new NewTopic(name, numPartitions, replicationFactor).configs(configs))).all());
|
name,
|
||||||
|
Optional.of(numPartitions),
|
||||||
|
Optional.ofNullable(replicationFactor).map(Integer::shortValue)
|
||||||
|
).configs(configs);
|
||||||
|
return toMono(client.createTopics(List.of(newTopic)).all());
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> alterPartitionReassignments(
|
public Mono<Void> alterPartitionReassignments(
|
||||||
|
|
|
@ -170,11 +170,11 @@ public class TopicsService {
|
||||||
adminClient.createTopic(
|
adminClient.createTopic(
|
||||||
topicData.getName(),
|
topicData.getName(),
|
||||||
topicData.getPartitions(),
|
topicData.getPartitions(),
|
||||||
topicData.getReplicationFactor().shortValue(),
|
topicData.getReplicationFactor(),
|
||||||
topicData.getConfigs()
|
topicData.getConfigs()
|
||||||
).thenReturn(topicData)
|
).thenReturn(topicData)
|
||||||
)
|
)
|
||||||
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
.onErrorMap(t -> new TopicMetadataException(t.getMessage(), t))
|
||||||
.flatMap(topicData -> loadTopicAfterCreation(c, topicData.getName()));
|
.flatMap(topicData -> loadTopicAfterCreation(c, topicData.getName()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ public class TopicsService {
|
||||||
ac.createTopic(
|
ac.createTopic(
|
||||||
topic.getName(),
|
topic.getName(),
|
||||||
topic.getPartitionCount(),
|
topic.getPartitionCount(),
|
||||||
(short) topic.getReplicationFactor(),
|
topic.getReplicationFactor(),
|
||||||
topic.getTopicConfigs()
|
topic.getTopicConfigs()
|
||||||
.stream()
|
.stream()
|
||||||
.collect(Collectors.toMap(InternalTopicConfig::getName,
|
.collect(Collectors.toMap(InternalTopicConfig::getName,
|
||||||
|
@ -430,7 +430,7 @@ public class TopicsService {
|
||||||
ac.createTopic(
|
ac.createTopic(
|
||||||
newTopicName,
|
newTopicName,
|
||||||
topic.getPartitionCount(),
|
topic.getPartitionCount(),
|
||||||
(short) topic.getReplicationFactor(),
|
topic.getReplicationFactor(),
|
||||||
topic.getTopicConfigs()
|
topic.getTopicConfigs()
|
||||||
.stream()
|
.stream()
|
||||||
.collect(Collectors
|
.collect(Collectors
|
||||||
|
|
|
@ -2261,7 +2261,6 @@ components:
|
||||||
required:
|
required:
|
||||||
- name
|
- name
|
||||||
- partitions
|
- partitions
|
||||||
- replicationFactor
|
|
||||||
|
|
||||||
TopicUpdate:
|
TopicUpdate:
|
||||||
type: object
|
type: object
|
||||||
|
|
Loading…
Add table
Reference in a new issue