Merge branch 'master' into DISCUSSION-4109_sr_serde_serialize
This commit is contained in:
commit
433ba0df10
44 changed files with 763 additions and 672 deletions
2
.github/workflows/aws_publisher.yaml
vendored
2
.github/workflows/aws_publisher.yaml
vendored
|
@ -31,7 +31,7 @@ jobs:
|
|||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
2
.github/workflows/branch-deploy.yml
vendored
2
.github/workflows/branch-deploy.yml
vendored
|
@ -45,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/build-public-image.yml
vendored
2
.github/workflows/build-public-image.yml
vendored
|
@ -42,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/delete-public-image.yml
vendored
2
.github/workflows/delete-public-image.yml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-automation.yml
vendored
2
.github/workflows/e2e-automation.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-checks.yaml
vendored
2
.github/workflows/e2e-checks.yaml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-weekly.yml
vendored
2
.github/workflows/e2e-weekly.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -47,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/terraform-deploy.yml
vendored
2
.github/workflows/terraform-deploy.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
|||
echo "Terraform will be triggered in this dir $TF_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -7,8 +7,6 @@ import org.springframework.http.HttpMethod;
|
|||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.server.reactive.ServerHttpRequest;
|
||||
import org.springframework.http.server.reactive.ServerHttpResponse;
|
||||
import org.springframework.web.reactive.config.CorsRegistry;
|
||||
import org.springframework.web.reactive.config.WebFluxConfigurer;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
import org.springframework.web.server.WebFilterChain;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.beans.Transient;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import lombok.Value;
|
||||
|
||||
public record AuthenticatedUser(String principal, Collection<String> groups) {
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.user.OAuth2User;
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
|
||||
|
|
|
@ -13,7 +13,6 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -38,7 +37,7 @@ public class AccessController implements AuthorizationApi {
|
|||
.filter(role -> user.groups().contains(role.getName()))
|
||||
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
|
||||
.flatMap(Collection::stream)
|
||||
.collect(Collectors.toList())
|
||||
.toList()
|
||||
)
|
||||
.switchIfEmpty(Mono.just(Collections.emptyList()));
|
||||
|
||||
|
@ -70,10 +69,10 @@ public class AccessController implements AuthorizationApi {
|
|||
.map(String::toUpperCase)
|
||||
.map(this::mapAction)
|
||||
.filter(Objects::nonNull)
|
||||
.collect(Collectors.toList()));
|
||||
.toList());
|
||||
return dto;
|
||||
})
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -26,6 +26,8 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class BrokersController extends AbstractController implements BrokersApi {
|
||||
private static final String BROKER_ID = "brokerId";
|
||||
|
||||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
|
||||
|
@ -89,7 +91,7 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW)
|
||||
.operationName("getBrokerConfig")
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
|
@ -108,7 +110,7 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerTopicPartitionLogDir")
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -128,7 +130,7 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerConfigByName")
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
|
|
@ -22,7 +22,6 @@ import com.provectus.kafka.ui.service.OffsetsResetService;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -200,7 +199,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
|
||||
.stream()
|
||||
.map(ConsumerGroupMapper::toDto)
|
||||
.collect(Collectors.toList()));
|
||||
.toList());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import reactor.core.publisher.Mono;
|
|||
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
|
||||
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
|
||||
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
|
||||
private static final String CONNECTOR_NAME = "connectorName";
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
|
||||
|
@ -112,7 +113,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("deleteConnector")
|
||||
.operationParams(Map.of("connectorName", connectName))
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -180,7 +181,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("setConnectorConfig")
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -207,7 +208,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(connectActions)
|
||||
.operationName("updateConnectorState")
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -227,7 +228,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectorTasks")
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
|
@ -247,7 +248,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
|
||||
.operationName("restartConnectorTask")
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
|
|
@ -15,7 +15,6 @@ import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
|||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -235,7 +234,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
List<String> subjectsToRender = filteredSubjects.stream()
|
||||
.skip(subjectToSkip)
|
||||
.limit(pageSize)
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
|
||||
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
|
||||
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
|
||||
|
|
|
@ -22,6 +22,7 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
|
|||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
|
@ -143,7 +144,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.map(lst -> lst.stream()
|
||||
.map(InternalTopicConfig::from)
|
||||
.map(clusterMapper::toTopicConfig)
|
||||
.collect(toList()))
|
||||
.toList())
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
|
@ -207,7 +208,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
|
||||
.map(topicsToRender ->
|
||||
new TopicsResponseDTO()
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
|
||||
.pageCount(totalPages));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
|
@ -327,6 +328,34 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW)
|
||||
.operationName("getActiveProducerStates")
|
||||
.build();
|
||||
|
||||
Comparator<TopicProducerStateDTO> ordering =
|
||||
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
|
||||
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
|
||||
|
||||
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
|
||||
.flatMapMany(statesMap ->
|
||||
Flux.fromStream(
|
||||
statesMap.entrySet().stream()
|
||||
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
|
||||
.sorted(ordering)));
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(states)
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(
|
||||
TopicColumnsToSortDTO orderBy) {
|
||||
var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
||||
|
|
|
@ -5,7 +5,6 @@ import java.util.Collection;
|
|||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
|
@ -28,7 +27,7 @@ class OffsetsInfo {
|
|||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
.collect(Collectors.toList())
|
||||
.toList()
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
err.setFieldName(e.getKey());
|
||||
err.setRestrictions(List.copyOf(e.getValue()));
|
||||
return err;
|
||||
}).collect(Collectors.toList());
|
||||
}).toList();
|
||||
|
||||
var message = fieldsErrors.isEmpty()
|
||||
? exception.getMessage()
|
||||
|
|
|
@ -30,11 +30,12 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
|
|||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.common.acl.AccessControlEntry;
|
||||
import org.apache.kafka.common.acl.AclBinding;
|
||||
import org.apache.kafka.common.acl.AclOperation;
|
||||
|
@ -54,7 +55,7 @@ public interface ClusterMapper {
|
|||
|
||||
default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
|
||||
return new ClusterMetricsDTO()
|
||||
.items(metrics.getSummarizedMetrics().map(this::convert).collect(Collectors.toList()));
|
||||
.items(metrics.getSummarizedMetrics().map(this::convert).toList());
|
||||
}
|
||||
|
||||
private MetricDTO convert(RawMetric rawMetric) {
|
||||
|
@ -66,7 +67,7 @@ public interface ClusterMapper {
|
|||
|
||||
default BrokerMetricsDTO toBrokerMetrics(List<RawMetric> metrics) {
|
||||
return new BrokerMetricsDTO()
|
||||
.metrics(metrics.stream().map(this::convert).collect(Collectors.toList()));
|
||||
.metrics(metrics.stream().map(this::convert).toList());
|
||||
}
|
||||
|
||||
@Mapping(target = "isSensitive", source = "sensitive")
|
||||
|
@ -107,7 +108,7 @@ public interface ClusterMapper {
|
|||
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
|
||||
|
||||
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
|
||||
return map.values().stream().map(this::toPartition).collect(Collectors.toList());
|
||||
return map.values().stream().map(this::toPartition).toList();
|
||||
}
|
||||
|
||||
default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
|
||||
|
@ -118,6 +119,17 @@ public interface ClusterMapper {
|
|||
return brokerDiskUsage;
|
||||
}
|
||||
|
||||
default TopicProducerStateDTO map(int partition, ProducerState state) {
|
||||
return new TopicProducerStateDTO()
|
||||
.partition(partition)
|
||||
.producerId(state.producerId())
|
||||
.producerEpoch(state.producerEpoch())
|
||||
.lastSequence(state.lastSequence())
|
||||
.lastTimestampMs(state.lastTimestamp())
|
||||
.coordinatorEpoch(state.coordinatorEpoch().stream().boxed().findAny().orElse(null))
|
||||
.currentTransactionStartOffset(state.currentTransactionStartOffset().stream().boxed().findAny().orElse(null));
|
||||
}
|
||||
|
||||
static KafkaAclDTO.OperationEnum mapAclOperation(AclOperation operation) {
|
||||
return switch (operation) {
|
||||
case ALL -> KafkaAclDTO.OperationEnum.ALL;
|
||||
|
|
|
@ -21,7 +21,7 @@ public class DescribeLogDirsMapper {
|
|||
return logDirsInfo.entrySet().stream().map(
|
||||
mapEntry -> mapEntry.getValue().entrySet().stream()
|
||||
.map(e -> toBrokerLogDirs(mapEntry.getKey(), e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList())
|
||||
.toList()
|
||||
).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ public class DescribeLogDirsMapper {
|
|||
var topics = logDirInfo.replicaInfos.entrySet().stream()
|
||||
.collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
|
||||
.map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
result.setTopics(topics);
|
||||
return result;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ public class DescribeLogDirsMapper {
|
|||
topic.setPartitions(
|
||||
partitions.stream().map(
|
||||
e -> topicPartitionLogDir(
|
||||
broker, e.getKey().partition(), e.getValue())).collect(Collectors.toList())
|
||||
broker, e.getKey().partition(), e.getValue())).toList()
|
||||
);
|
||||
return topic;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ public class InternalLogDirStats {
|
|||
topicMap.getValue().replicaInfos.entrySet().stream()
|
||||
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
|
||||
)
|
||||
).collect(toList());
|
||||
).toList();
|
||||
|
||||
partitionsStats = topicPartitions.stream().collect(
|
||||
groupingBy(
|
||||
|
|
|
@ -52,6 +52,8 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public static final class AccessContextBuilder {
|
||||
private static final String ACTIONS_NOT_PRESENT = "actions not present";
|
||||
|
||||
private Collection<ApplicationConfigAction> applicationConfigActions = Collections.emptySet();
|
||||
private String cluster;
|
||||
private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
|
||||
|
@ -75,7 +77,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder applicationConfigActions(ApplicationConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.applicationConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -86,7 +88,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder clusterConfigActions(ClusterConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.clusterConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -97,7 +99,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder topicActions(TopicAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.topicActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -108,7 +110,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder consumerGroupActions(ConsumerGroupAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.consumerGroupActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -119,7 +121,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder connectActions(ConnectAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.connectActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -135,25 +137,25 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder schemaActions(SchemaAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.schemaActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder ksqlActions(KsqlAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.ksqlActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder aclActions(AclAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.aclActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder auditActions(AuditAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
this.auditActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
|||
import java.util.Base64;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
|
||||
public class Base64Serde implements BuiltInSerde {
|
||||
|
||||
|
|
|
@ -28,6 +28,23 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
|
||||
private static final JsonMapper JSON_MAPPER = createMapper();
|
||||
|
||||
private static final String ASSIGNMENT = "assignment";
|
||||
private static final String CLIENT_HOST = "client_host";
|
||||
private static final String CLIENT_ID = "client_id";
|
||||
private static final String COMMIT_TIMESTAMP = "commit_timestamp";
|
||||
private static final String CURRENT_STATE_TIMESTAMP = "current_state_timestamp";
|
||||
private static final String GENERATION = "generation";
|
||||
private static final String LEADER = "leader";
|
||||
private static final String MEMBERS = "members";
|
||||
private static final String MEMBER_ID = "member_id";
|
||||
private static final String METADATA = "metadata";
|
||||
private static final String OFFSET = "offset";
|
||||
private static final String PROTOCOL = "protocol";
|
||||
private static final String PROTOCOL_TYPE = "protocol_type";
|
||||
private static final String REBALANCE_TIMEOUT = "rebalance_timeout";
|
||||
private static final String SESSION_TIMEOUT = "session_timeout";
|
||||
private static final String SUBSCRIPTION = "subscription";
|
||||
|
||||
public static final String TOPIC = "__consumer_offsets";
|
||||
|
||||
public static String name() {
|
||||
|
@ -116,128 +133,128 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
private Deserializer valueDeserializer() {
|
||||
final Schema commitOffsetSchemaV0 =
|
||||
new Schema(
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV1 =
|
||||
new Schema(
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, ""),
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
|
||||
new Field("expire_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV2 =
|
||||
new Schema(
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV3 =
|
||||
new Schema(
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field("leader_epoch", Type.INT32, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV4 = new Schema(
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field("leader_epoch", Type.INT32, ""),
|
||||
new Field("metadata", Type.COMPACT_STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, ""),
|
||||
new Field(METADATA, Type.COMPACT_STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
);
|
||||
|
||||
final Schema metadataSchema0 =
|
||||
new Schema(
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema1 =
|
||||
new Schema(
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema2 =
|
||||
new Schema(
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema3 =
|
||||
new Schema(
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field("group_instance_id", Type.NULLABLE_STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema4 =
|
||||
new Schema(
|
||||
new Field("protocol_type", Type.COMPACT_STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new CompactArrayOf(new Schema(
|
||||
new Field("member_id", Type.COMPACT_STRING, ""),
|
||||
new Field(PROTOCOL_TYPE, Type.COMPACT_STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new CompactArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.COMPACT_STRING, ""),
|
||||
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("client_id", Type.COMPACT_STRING, ""),
|
||||
new Field("client_host", Type.COMPACT_STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.COMPACT_BYTES, ""),
|
||||
new Field("assignment", Type.COMPACT_BYTES, ""),
|
||||
new Field(CLIENT_ID, Type.COMPACT_STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.COMPACT_STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.COMPACT_BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.COMPACT_BYTES, ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
)), ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
|
@ -249,7 +266,7 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
short version = bb.getShort();
|
||||
// ideally, we should distinguish if value is commit or metadata
|
||||
// by checking record's key, but our current serde structure doesn't allow that.
|
||||
// so, we trying to parse into metadata first and after into commit msg
|
||||
// so, we are trying to parse into metadata first and after into commit msg
|
||||
try {
|
||||
result = toJson(
|
||||
switch (version) {
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.provectus.kafka.ui.serdes.builtin;
|
|||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
|
|
|
@ -47,6 +47,8 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
return "SchemaRegistry";
|
||||
}
|
||||
|
||||
private static final String SCHEMA_REGISTRY = "schemaRegistry";
|
||||
|
||||
private SchemaRegistryClient schemaRegistryClient;
|
||||
private List<String> schemaRegistryUrls;
|
||||
private String valueSchemaNameTemplate;
|
||||
|
@ -58,7 +60,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
return kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
|
||||
return kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.isPresent();
|
||||
}
|
||||
|
@ -66,7 +68,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public void autoConfigure(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
var urls = kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
|
||||
var urls = kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
|
||||
configure(
|
||||
|
@ -92,7 +94,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
var urls = serdeProperties.getListProperty("url", String.class)
|
||||
.or(() -> kafkaClusterProperties.getListProperty("schemaRegistry", String.class))
|
||||
.or(() -> kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class))
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
|
||||
configure(
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.function.BiFunction;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
|
@ -55,6 +56,7 @@ import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
|||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.clients.admin.RecordsToDelete;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
|
@ -658,6 +660,21 @@ public class ReactiveAdminClient implements Closeable {
|
|||
return toMono(client.alterReplicaLogDirs(replicaAssignment).all());
|
||||
}
|
||||
|
||||
// returns tp -> list of active producer's states (if any)
|
||||
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(String topic) {
|
||||
return describeTopic(topic)
|
||||
.map(td -> client.describeProducers(
|
||||
IntStream.range(0, td.partitions().size())
|
||||
.mapToObj(i -> new TopicPartition(topic, i))
|
||||
.toList()
|
||||
).all()
|
||||
)
|
||||
.flatMap(ReactiveAdminClient::toMono)
|
||||
.map(map -> map.entrySet().stream()
|
||||
.filter(e -> !e.getValue().activeProducers().isEmpty()) // skipping partitions without producers
|
||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().activeProducers())));
|
||||
}
|
||||
|
||||
private Mono<Void> incrementalAlterConfig(String topicName,
|
||||
List<ConfigEntry> currentConfigs,
|
||||
Map<String, String> newConfigs) {
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.kafka.clients.admin.ConfigEntry;
|
|||
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
@ -459,6 +460,11 @@ public class TopicsService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(KafkaCluster cluster, String topic) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.getActiveProducersState(topic));
|
||||
}
|
||||
|
||||
private Mono<List<String>> filterExisting(KafkaCluster cluster, Collection<String> topics) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.listTopics(true))
|
||||
|
|
|
@ -11,6 +11,9 @@ import org.apache.kafka.common.Node;
|
|||
|
||||
class WellKnownMetrics {
|
||||
|
||||
private static final String BROKER_TOPIC_METRICS = "BrokerTopicMetrics";
|
||||
private static final String FIFTEEN_MINUTE_RATE = "FifteenMinuteRate";
|
||||
|
||||
// per broker
|
||||
final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
|
||||
final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
|
||||
|
@ -36,15 +39,15 @@ class WellKnownMetrics {
|
|||
if (!brokerBytesInFifteenMinuteRate.containsKey(node.id())
|
||||
&& rawMetric.labels().size() == 1
|
||||
&& "BytesInPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
|
||||
&& containsIgnoreCase(name, "BrokerTopicMetrics")
|
||||
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
|
||||
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
|
||||
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
|
||||
brokerBytesInFifteenMinuteRate.put(node.id(), rawMetric.value());
|
||||
}
|
||||
if (!brokerBytesOutFifteenMinuteRate.containsKey(node.id())
|
||||
&& rawMetric.labels().size() == 1
|
||||
&& "BytesOutPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
|
||||
&& containsIgnoreCase(name, "BrokerTopicMetrics")
|
||||
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
|
||||
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
|
||||
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
|
||||
brokerBytesOutFifteenMinuteRate.put(node.id(), rawMetric.value());
|
||||
}
|
||||
}
|
||||
|
@ -53,8 +56,8 @@ class WellKnownMetrics {
|
|||
String name = rawMetric.name();
|
||||
String topic = rawMetric.labels().get("topic");
|
||||
if (topic != null
|
||||
&& containsIgnoreCase(name, "BrokerTopicMetrics")
|
||||
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
|
||||
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
|
||||
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
|
||||
String nameProperty = rawMetric.labels().get("name");
|
||||
if ("BytesInPerSec".equalsIgnoreCase(nameProperty)) {
|
||||
bytesInFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));
|
||||
|
|
|
@ -52,6 +52,7 @@ import reactor.core.publisher.Mono;
|
|||
public class AccessControlService {
|
||||
|
||||
private static final String ACCESS_DENIED = "Access denied";
|
||||
private static final String ACTIONS_ARE_EMPTY = "actions are empty";
|
||||
|
||||
@Nullable
|
||||
private final InMemoryReactiveClientRegistrationRepository clientRegistrationRepository;
|
||||
|
@ -206,7 +207,7 @@ public class AccessControlService {
|
|||
if (context.getTopic() == null && context.getTopicActions().isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
Assert.isTrue(!context.getTopicActions().isEmpty(), "actions are empty");
|
||||
Assert.isTrue(!context.getTopicActions().isEmpty(), ACTIONS_ARE_EMPTY);
|
||||
|
||||
Set<String> requiredActions = context.getTopicActions()
|
||||
.stream()
|
||||
|
@ -243,7 +244,7 @@ public class AccessControlService {
|
|||
if (context.getConsumerGroup() == null && context.getConsumerGroupActions().isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
Assert.isTrue(!context.getConsumerGroupActions().isEmpty(), "actions are empty");
|
||||
Assert.isTrue(!context.getConsumerGroupActions().isEmpty(), ACTIONS_ARE_EMPTY);
|
||||
|
||||
Set<String> requiredActions = context.getConsumerGroupActions()
|
||||
.stream()
|
||||
|
@ -276,7 +277,7 @@ public class AccessControlService {
|
|||
if (context.getSchema() == null && context.getSchemaActions().isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
Assert.isTrue(!context.getSchemaActions().isEmpty(), "actions are empty");
|
||||
Assert.isTrue(!context.getSchemaActions().isEmpty(), ACTIONS_ARE_EMPTY);
|
||||
|
||||
Set<String> requiredActions = context.getSchemaActions()
|
||||
.stream()
|
||||
|
@ -309,7 +310,7 @@ public class AccessControlService {
|
|||
if (context.getConnect() == null && context.getConnectActions().isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
Assert.isTrue(!context.getConnectActions().isEmpty(), "actions are empty");
|
||||
Assert.isTrue(!context.getConnectActions().isEmpty(), ACTIONS_ARE_EMPTY);
|
||||
|
||||
Set<String> requiredActions = context.getConnectActions()
|
||||
.stream()
|
||||
|
|
|
@ -59,8 +59,8 @@ public class CognitoAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_COGNITO))
|
||||
.filter(s -> s.getType().equals("group"))
|
||||
.anyMatch(subject -> Stream.of(groups)
|
||||
.map(Object::toString)
|
||||
.anyMatch(subject -> groups
|
||||
.stream()
|
||||
.anyMatch(cognitoGroup -> cognitoGroup.equals(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName)
|
||||
|
|
|
@ -43,6 +43,8 @@ public class JsonAvroConversion {
|
|||
|
||||
private static final JsonMapper MAPPER = new JsonMapper();
|
||||
private static final Schema NULL_SCHEMA = Schema.create(Schema.Type.NULL);
|
||||
private static final String FORMAT = "format";
|
||||
private static final String DATE_TIME = "date-time";
|
||||
|
||||
// converts json into Object that is expected input for KafkaAvroSerializer
|
||||
// (with AVRO_USE_LOGICAL_TYPE_CONVERTERS flat enabled!)
|
||||
|
@ -347,7 +349,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("uuid"))))
|
||||
Map.of(FORMAT, new TextNode("uuid"))))
|
||||
),
|
||||
|
||||
DECIMAL("decimal",
|
||||
|
@ -385,7 +387,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("date"))))
|
||||
Map.of(FORMAT, new TextNode("date"))))
|
||||
),
|
||||
|
||||
TIME_MILLIS("time-millis",
|
||||
|
@ -406,7 +408,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("time"))))
|
||||
Map.of(FORMAT, new TextNode("time"))))
|
||||
),
|
||||
|
||||
TIME_MICROS("time-micros",
|
||||
|
@ -427,7 +429,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("time"))))
|
||||
Map.of(FORMAT, new TextNode("time"))))
|
||||
),
|
||||
|
||||
TIMESTAMP_MILLIS("timestamp-millis",
|
||||
|
@ -448,7 +450,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("date-time"))))
|
||||
Map.of(FORMAT, new TextNode(DATE_TIME))))
|
||||
),
|
||||
|
||||
TIMESTAMP_MICROS("timestamp-micros",
|
||||
|
@ -473,7 +475,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("date-time"))))
|
||||
Map.of(FORMAT, new TextNode(DATE_TIME))))
|
||||
),
|
||||
|
||||
LOCAL_TIMESTAMP_MILLIS("local-timestamp-millis",
|
||||
|
@ -491,7 +493,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("date-time"))))
|
||||
Map.of(FORMAT, new TextNode(DATE_TIME))))
|
||||
),
|
||||
|
||||
LOCAL_TIMESTAMP_MICROS("local-timestamp-micros",
|
||||
|
@ -508,7 +510,7 @@ public class JsonAvroConversion {
|
|||
new SimpleFieldSchema(
|
||||
new SimpleJsonType(
|
||||
JsonType.Type.STRING,
|
||||
Map.of("format", new TextNode("date-time"))))
|
||||
Map.of(FORMAT, new TextNode(DATE_TIME))))
|
||||
);
|
||||
|
||||
private final String name;
|
||||
|
|
|
@ -37,6 +37,9 @@ import reactor.util.function.Tuples;
|
|||
|
||||
public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.Descriptor> {
|
||||
|
||||
private static final String MAXIMUM = "maximum";
|
||||
private static final String MINIMUM = "minimum";
|
||||
|
||||
private final Set<String> simpleTypesWrapperNames = Set.of(
|
||||
BoolValue.getDescriptor().getFullName(),
|
||||
Int32Value.getDescriptor().getFullName(),
|
||||
|
@ -156,15 +159,15 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
|
|||
case INT32, FIXED32, SFIXED32, SINT32 -> new SimpleJsonType(
|
||||
JsonType.Type.INTEGER,
|
||||
Map.of(
|
||||
"maximum", IntNode.valueOf(Integer.MAX_VALUE),
|
||||
"minimum", IntNode.valueOf(Integer.MIN_VALUE)
|
||||
MAXIMUM, IntNode.valueOf(Integer.MAX_VALUE),
|
||||
MINIMUM, IntNode.valueOf(Integer.MIN_VALUE)
|
||||
)
|
||||
);
|
||||
case UINT32 -> new SimpleJsonType(
|
||||
JsonType.Type.INTEGER,
|
||||
Map.of(
|
||||
"maximum", LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
|
||||
"minimum", IntNode.valueOf(0)
|
||||
MAXIMUM, LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
|
||||
MINIMUM, IntNode.valueOf(0)
|
||||
)
|
||||
);
|
||||
//TODO: actually all *64 types will be printed with quotes (as strings),
|
||||
|
@ -173,15 +176,15 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
|
|||
case INT64, FIXED64, SFIXED64, SINT64 -> new SimpleJsonType(
|
||||
JsonType.Type.INTEGER,
|
||||
Map.of(
|
||||
"maximum", LongNode.valueOf(Long.MAX_VALUE),
|
||||
"minimum", LongNode.valueOf(Long.MIN_VALUE)
|
||||
MAXIMUM, LongNode.valueOf(Long.MAX_VALUE),
|
||||
MINIMUM, LongNode.valueOf(Long.MIN_VALUE)
|
||||
)
|
||||
);
|
||||
case UINT64 -> new SimpleJsonType(
|
||||
JsonType.Type.INTEGER,
|
||||
Map.of(
|
||||
"maximum", new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
|
||||
"minimum", LongNode.valueOf(0)
|
||||
MAXIMUM, new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
|
||||
MINIMUM, LongNode.valueOf(0)
|
||||
)
|
||||
);
|
||||
case MESSAGE, GROUP -> new SimpleJsonType(JsonType.Type.OBJECT);
|
||||
|
|
|
@ -763,6 +763,33 @@ paths:
|
|||
404:
|
||||
description: Not found
|
||||
|
||||
/api/clusters/{clusterName}/topics/{topicName}/activeproducers:
|
||||
get:
|
||||
tags:
|
||||
- Topics
|
||||
summary: get producer states for topic
|
||||
operationId: getActiveProducerStates
|
||||
parameters:
|
||||
- name: clusterName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
- name: topicName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/TopicProducerState'
|
||||
|
||||
/api/clusters/{clusterName}/topics/{topicName}/consumer-groups:
|
||||
get:
|
||||
tags:
|
||||
|
@ -2619,6 +2646,31 @@ components:
|
|||
- PROTOBUF
|
||||
- UNKNOWN
|
||||
|
||||
TopicProducerState:
|
||||
type: object
|
||||
properties:
|
||||
partition:
|
||||
type: integer
|
||||
format: int32
|
||||
producerId:
|
||||
type: integer
|
||||
format: int64
|
||||
producerEpoch:
|
||||
type: integer
|
||||
format: int32
|
||||
lastSequence:
|
||||
type: integer
|
||||
format: int32
|
||||
lastTimestampMs:
|
||||
type: integer
|
||||
format: int64
|
||||
coordinatorEpoch:
|
||||
type: integer
|
||||
format: int32
|
||||
currentTransactionStartOffset:
|
||||
type: integer
|
||||
format: int64
|
||||
|
||||
ConsumerGroup:
|
||||
discriminator:
|
||||
propertyName: inherit
|
||||
|
|
|
@ -10,25 +10,27 @@ import lombok.experimental.Accessors;
|
|||
@Accessors(chain = true)
|
||||
public class Schema {
|
||||
|
||||
private static final String USER_DIR = "user.dir";
|
||||
|
||||
private String name, valuePath;
|
||||
private SchemaType type;
|
||||
|
||||
public static Schema createSchemaAvro() {
|
||||
return new Schema().setName("schema_avro-" + randomAlphabetic(5))
|
||||
.setType(SchemaType.AVRO)
|
||||
.setValuePath(System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_avro_value.json");
|
||||
.setValuePath(System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_avro_value.json");
|
||||
}
|
||||
|
||||
public static Schema createSchemaJson() {
|
||||
return new Schema().setName("schema_json-" + randomAlphabetic(5))
|
||||
.setType(SchemaType.JSON)
|
||||
.setValuePath(System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_json_Value.json");
|
||||
.setValuePath(System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_json_Value.json");
|
||||
}
|
||||
|
||||
public static Schema createSchemaProtobuf() {
|
||||
return new Schema().setName("schema_protobuf-" + randomAlphabetic(5))
|
||||
.setType(SchemaType.PROTOBUF)
|
||||
.setValuePath(
|
||||
System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_protobuf_value.txt");
|
||||
System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_protobuf_value.txt");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@ import io.qameta.allure.Step;
|
|||
|
||||
public class TopicCreateEditForm extends BasePage {
|
||||
|
||||
private static final String RETENTION_BYTES = "retentionBytes";
|
||||
|
||||
protected SelenideElement timeToRetainField = $x("//input[@id='timeToRetain']");
|
||||
protected SelenideElement partitionsField = $x("//input[@name='partitions']");
|
||||
protected SelenideElement nameField = $(id("topicFormName"));
|
||||
|
@ -138,12 +140,12 @@ public class TopicCreateEditForm extends BasePage {
|
|||
|
||||
@Step
|
||||
public TopicCreateEditForm selectRetentionBytes(String visibleValue) {
|
||||
return selectFromDropDownByVisibleText("retentionBytes", visibleValue);
|
||||
return selectFromDropDownByVisibleText(RETENTION_BYTES, visibleValue);
|
||||
}
|
||||
|
||||
@Step
|
||||
public TopicCreateEditForm selectRetentionBytes(Long optionValue) {
|
||||
return selectFromDropDownByOptionValue("retentionBytes", optionValue.toString());
|
||||
return selectFromDropDownByOptionValue(RETENTION_BYTES, optionValue.toString());
|
||||
}
|
||||
|
||||
@Step
|
||||
|
@ -202,7 +204,7 @@ public class TopicCreateEditForm extends BasePage {
|
|||
|
||||
@Step
|
||||
public String getMaxSizeOnDisk() {
|
||||
return new KafkaUiSelectElement("retentionBytes").getCurrentValue();
|
||||
return new KafkaUiSelectElement(RETENTION_BYTES).getCurrentValue();
|
||||
}
|
||||
|
||||
@Step
|
||||
|
|
942
kafka-ui-react-app/pnpm-lock.yaml
generated
942
kafka-ui-react-app/pnpm-lock.yaml
generated
File diff suppressed because it is too large
Load diff
|
@ -44,9 +44,11 @@ const Metrics: React.FC = () => {
|
|||
if (data.progress) {
|
||||
return (
|
||||
<S.ProgressContainer>
|
||||
<S.ProgressPct>
|
||||
{Math.floor(data.progress.completenessPercent || 0)}%
|
||||
</S.ProgressPct>
|
||||
<S.ProgressBarWrapper>
|
||||
<ProgressBar completed={data.progress.completenessPercent || 0} />
|
||||
<span> {Math.floor(data.progress.completenessPercent || 0)} %</span>
|
||||
</S.ProgressBarWrapper>
|
||||
<ActionButton
|
||||
onClick={async () => {
|
||||
|
|
|
@ -42,3 +42,10 @@ export const ProgressBarWrapper = styled.div`
|
|||
align-items: center;
|
||||
width: 280px;
|
||||
`;
|
||||
|
||||
export const ProgressPct = styled.span`
|
||||
font-size: 15px;
|
||||
font-weight: bold;
|
||||
line-height: 1.5;
|
||||
color: ${({ theme }) => theme.statictics.progressPctColor};
|
||||
`;
|
||||
|
|
|
@ -291,6 +291,7 @@ const baseTheme = {
|
|||
},
|
||||
statictics: {
|
||||
createdAtColor: Colors.neutral[50],
|
||||
progressPctColor: Colors.neutral[100],
|
||||
},
|
||||
progressBar: {
|
||||
backgroundColor: Colors.neutral[3],
|
||||
|
|
Loading…
Add table
Reference in a new issue