Merge branch 'master' into issues/checkShowStreamsRequestExecution2

# Conflicts:
#	kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/manualsuite/backlog/SmokeBacklog.java
#	kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/smokesuite/ksqldb/KsqlDbTest.java
This commit is contained in:
VladSenyuta 2023-04-24 12:59:37 +03:00
commit 3531c0cac7
51 changed files with 611 additions and 221 deletions

11
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View file

@ -0,0 +1,11 @@
blank_issues_enabled: false
contact_links:
- name: Official documentation
url: https://docs.kafka-ui.provectus.io/
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
- name: Community Discord
url: https://discord.gg/4DWzD7pGE5
about: Chat with other users, get some support or ask questions.
- name: GitHub Discussions
url: https://github.com/provectus/kafka-ui/discussions
about: An alternative place to ask questions or to get some support.

View file

@ -1,16 +0,0 @@
---
name: "❓ Question"
about: Ask a question
title: ''
---
<!--
To ask a question, please either:
1. Open up a discussion (https://github.com/provectus/kafka-ui/discussions)
2. Join us on discord (https://discord.gg/4DWzD7pGE5) and ask there.
Don't forget to check/search for existing issues/discussions.
-->

View file

@ -6,7 +6,7 @@ jobs:
block_merge:
runs-on: ubuntu-latest
steps:
- uses: mheap/github-action-required-labels@v3
- uses: mheap/github-action-required-labels@v4
with:
mode: exactly
count: 0

View file

@ -86,7 +86,7 @@ jobs:
- name: make comment with private deployment link
if: ${{ github.event.label.name == 'status/feature_testing' }}
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
@ -94,7 +94,7 @@ jobs:
- name: make comment with public deployment link
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |

View file

@ -21,7 +21,7 @@ jobs:
git add ../kafka-ui-from-branch/
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
- name: make comment with deployment link
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |

View file

@ -65,7 +65,7 @@ jobs:
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |

View file

@ -55,7 +55,7 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.9.2
uses: aquasecurity/trivy-action@0.10.0
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"

View file

@ -33,7 +33,7 @@ jobs:
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
--region us-east-1
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v2
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |

View file

@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v7
- uses: actions/stale@v8
with:
days-before-issue-stale: 7
days-before-issue-close: 3

View file

@ -6,8 +6,9 @@ Following versions of the project are currently being supported with security up
| Version | Supported |
| ------- | ------------------ |
| 0.5.x | :white_check_mark: |
| 0.4.x | :x: |
| 0.6.x | :white_check_mark: |
| 0.5.x | :x: |
| 0.4.x | :x: |
| 0.3.x | :x: |
| 0.2.x | :x: |
| 0.1.x | :x: |

View file

@ -2,6 +2,6 @@ apiVersion: v2
name: kafka-ui
description: A Helm chart for kafka-UI
type: application
version: 0.6.1
appVersion: v0.6.1
version: 0.6.2
appVersion: v0.6.2
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

View file

@ -9,4 +9,6 @@ message MySpecificTopicValue {
message MyValue {
int32 version = 1;
string payload = 2;
map<int32, string> intToStringMap = 3;
map<string, MyValue> strToObjMap = 4;
}

View file

@ -27,6 +27,8 @@ public class ClustersProperties {
String internalTopicPrefix;
Integer adminClientTimeout;
PollingProperties polling = new PollingProperties();
@Data
@ -56,6 +58,8 @@ public class ClustersProperties {
Integer pollTimeoutMs;
Integer partitionPollTimeout;
Integer noDataEmptyPolls;
Integer maxPageSize;
Integer defaultPageSize;
}
@Data

View file

@ -5,7 +5,6 @@ import java.util.Map;
import lombok.AllArgsConstructor;
import org.openapitools.jackson.nullable.JsonNullableModule;
import org.springframework.beans.factory.ObjectProvider;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.boot.autoconfigure.web.reactive.WebFluxProperties;
import org.springframework.context.ApplicationContext;
@ -15,8 +14,6 @@ import org.springframework.http.server.reactive.ContextPathCompositeHandler;
import org.springframework.http.server.reactive.HttpHandler;
import org.springframework.jmx.export.MBeanExporter;
import org.springframework.util.StringUtils;
import org.springframework.util.unit.DataSize;
import org.springframework.web.reactive.function.client.WebClient;
import org.springframework.web.server.adapter.WebHttpHandlerBuilder;
@Configuration
@ -52,14 +49,7 @@ public class Config {
}
@Bean
public WebClient webClient(
@Value("${webclient.max-in-memory-buffer-size:20MB}") DataSize maxBuffSize) {
return WebClient.builder()
.codecs(c -> c.defaultCodecs().maxInMemorySize((int) maxBuffSize.toBytes()))
.build();
}
@Bean
// will be used by webflux json mapping
public JsonNullableModule jsonNullableModule() {
return new JsonNullableModule();
}

View file

@ -0,0 +1,33 @@
package com.provectus.kafka.ui.config;
import com.provectus.kafka.ui.exception.ValidationException;
import java.beans.Transient;
import javax.annotation.PostConstruct;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.context.annotation.Configuration;
import org.springframework.util.unit.DataSize;
@Configuration
@ConfigurationProperties("webclient")
@Data
public class WebclientProperties {
String maxInMemoryBufferSize;
@PostConstruct
public void validate() {
validateAndSetDefaultBufferSize();
}
private void validateAndSetDefaultBufferSize() {
if (maxInMemoryBufferSize != null) {
try {
DataSize.parse(maxInMemoryBufferSize);
} catch (Exception e) {
throw new ValidationException("Invalid format for webclient.maxInMemoryBufferSize");
}
}
}
}

View file

@ -43,9 +43,6 @@ import reactor.core.scheduler.Schedulers;
@Slf4j
public class MessagesController extends AbstractController implements MessagesApi {
private static final int MAX_LOAD_RECORD_LIMIT = 100;
private static final int DEFAULT_LOAD_RECORD_LIMIT = 20;
private final MessagesService messagesService;
private final DeserializationService deserializationService;
private final AccessControlService accessControlService;
@ -91,8 +88,6 @@ public class MessagesController extends AbstractController implements MessagesAp
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
int recordsLimit =
Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT);
var positions = new ConsumerPosition(
seekType,
@ -103,7 +98,7 @@ public class MessagesController extends AbstractController implements MessagesAp
ResponseEntity.ok(
messagesService.loadMessages(
getCluster(clusterName), topicName, positions, q, filterQueryType,
recordsLimit, seekDirection, keySerde, valueSerde)
limit, seekDirection, keySerde, valueSerde)
)
);

View file

@ -11,8 +11,6 @@ import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
@ -82,15 +80,8 @@ public class ConsumerGroupMapper {
InternalConsumerGroup c, T consumerGroup) {
consumerGroup.setGroupId(c.getGroupId());
consumerGroup.setMembers(c.getMembers().size());
int numTopics = Stream.concat(
c.getOffsets().keySet().stream().map(TopicPartition::topic),
c.getMembers().stream()
.flatMap(m -> m.getAssignment().stream().map(TopicPartition::topic))
).collect(Collectors.toSet()).size();
consumerGroup.setMessagesBehind(c.getMessagesBehind());
consumerGroup.setTopics(numTopics);
consumerGroup.setTopics(c.getTopicNum());
consumerGroup.setSimple(c.isSimple());
Optional.ofNullable(c.getState())

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.model;
import java.math.BigDecimal;
import javax.annotation.Nullable;
import lombok.Data;
import org.apache.kafka.common.Node;
@ -10,15 +11,27 @@ public class InternalBroker {
private final Integer id;
private final String host;
private final Integer port;
private final BigDecimal bytesInPerSec;
private final BigDecimal bytesOutPerSec;
private final @Nullable BigDecimal bytesInPerSec;
private final @Nullable BigDecimal bytesOutPerSec;
private final @Nullable Integer partitionsLeader;
private final @Nullable Integer partitions;
private final @Nullable Integer inSyncPartitions;
private final @Nullable BigDecimal leadersSkew;
private final @Nullable BigDecimal partitionsSkew;
public InternalBroker(Node node, Statistics statistics) {
public InternalBroker(Node node,
PartitionDistributionStats partitionDistribution,
Statistics statistics) {
this.id = node.id();
this.host = node.host();
this.port = node.port();
this.bytesInPerSec = statistics.getMetrics().getBrokerBytesInPerSec().get(node.id());
this.bytesOutPerSec = statistics.getMetrics().getBrokerBytesOutPerSec().get(node.id());
this.partitionsLeader = partitionDistribution.getPartitionLeaders().get(node);
this.partitions = partitionDistribution.getPartitionsCount().get(node);
this.inSyncPartitions = partitionDistribution.getInSyncPartitions().get(node);
this.leadersSkew = partitionDistribution.leadersSkew(node);
this.partitionsSkew = partitionDistribution.partitionsSkew(node);
}
}

View file

@ -5,6 +5,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import lombok.Builder;
import lombok.Data;
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
@ -21,6 +22,7 @@ public class InternalConsumerGroup {
private final Map<TopicPartition, Long> offsets;
private final Map<TopicPartition, Long> endOffsets;
private final Long messagesBehind;
private final Integer topicNum;
private final String partitionAssignor;
private final ConsumerGroupState state;
private final Node coordinator;
@ -44,22 +46,12 @@ public class InternalConsumerGroup {
builder.simple(description.isSimpleConsumerGroup());
builder.state(description.state());
builder.partitionAssignor(description.partitionAssignor());
builder.members(
description.members().stream()
.map(m ->
InternalConsumerGroup.InternalMember.builder()
.assignment(m.assignment().topicPartitions())
.clientId(m.clientId())
.groupInstanceId(m.groupInstanceId().orElse(""))
.consumerId(m.consumerId())
.clientId(m.clientId())
.host(m.host())
.build()
).collect(Collectors.toList())
);
Collection<InternalMember> internalMembers = initInternalMembers(description);
builder.members(internalMembers);
builder.offsets(groupOffsets);
builder.endOffsets(topicEndOffsets);
builder.messagesBehind(calculateMessagesBehind(groupOffsets, topicEndOffsets));
builder.topicNum(calculateTopicNum(groupOffsets, internalMembers));
Optional.ofNullable(description.coordinator()).ifPresent(builder::coordinator);
return builder.build();
}
@ -80,4 +72,31 @@ public class InternalConsumerGroup {
return messagesBehind;
}
private static Integer calculateTopicNum(Map<TopicPartition, Long> offsets, Collection<InternalMember> members) {
long topicNum = Stream.concat(
offsets.keySet().stream().map(TopicPartition::topic),
members.stream()
.flatMap(m -> m.getAssignment().stream().map(TopicPartition::topic))
).distinct().count();
return Integer.valueOf((int) topicNum);
}
private static Collection<InternalMember> initInternalMembers(ConsumerGroupDescription description) {
return description.members().stream()
.map(m ->
InternalConsumerGroup.InternalMember.builder()
.assignment(m.assignment().topicPartitions())
.clientId(m.clientId())
.groupInstanceId(m.groupInstanceId().orElse(""))
.consumerId(m.consumerId())
.clientId(m.clientId())
.host(m.host())
.build()
).collect(Collectors.toList());
}
}

View file

@ -0,0 +1,93 @@
package com.provectus.kafka.ui.model;
import java.math.BigDecimal;
import java.math.MathContext;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.Getter;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartitionInfo;
@RequiredArgsConstructor(access = AccessLevel.PRIVATE)
@Getter
@Slf4j
public class PartitionDistributionStats {
// avg skew will show unuseful results on low number of partitions
private static final int MIN_PARTITIONS_FOR_SKEW_CALCULATION = 50;
private static final MathContext ROUNDING_MATH_CTX = new MathContext(3);
private final Map<Node, Integer> partitionLeaders;
private final Map<Node, Integer> partitionsCount;
private final Map<Node, Integer> inSyncPartitions;
private final double avgLeadersCntPerBroker;
private final double avgPartitionsPerBroker;
private final boolean skewCanBeCalculated;
public static PartitionDistributionStats create(Statistics stats) {
return create(stats, MIN_PARTITIONS_FOR_SKEW_CALCULATION);
}
static PartitionDistributionStats create(Statistics stats, int minPartitionsForSkewCalculation) {
var partitionLeaders = new HashMap<Node, Integer>();
var partitionsReplicated = new HashMap<Node, Integer>();
var isr = new HashMap<Node, Integer>();
int partitionsCnt = 0;
for (TopicDescription td : stats.getTopicDescriptions().values()) {
for (TopicPartitionInfo tp : td.partitions()) {
partitionsCnt++;
tp.replicas().forEach(r -> incr(partitionsReplicated, r));
tp.isr().forEach(r -> incr(isr, r));
if (tp.leader() != null) {
incr(partitionLeaders, tp.leader());
}
}
}
int nodesWithPartitions = partitionsReplicated.size();
int partitionReplications = partitionsReplicated.values().stream().mapToInt(i -> i).sum();
var avgPartitionsPerBroker = nodesWithPartitions == 0 ? 0 : ((double) partitionReplications) / nodesWithPartitions;
int nodesWithLeaders = partitionLeaders.size();
int leadersCnt = partitionLeaders.values().stream().mapToInt(i -> i).sum();
var avgLeadersCntPerBroker = nodesWithLeaders == 0 ? 0 : ((double) leadersCnt) / nodesWithLeaders;
return new PartitionDistributionStats(
partitionLeaders,
partitionsReplicated,
isr,
avgLeadersCntPerBroker,
avgPartitionsPerBroker,
partitionsCnt >= minPartitionsForSkewCalculation
);
}
private static void incr(Map<Node, Integer> map, Node n) {
map.compute(n, (k, c) -> c == null ? 1 : ++c);
}
@Nullable
public BigDecimal partitionsSkew(Node node) {
return calculateAvgSkew(partitionsCount.get(node), avgPartitionsPerBroker);
}
@Nullable
public BigDecimal leadersSkew(Node node) {
return calculateAvgSkew(partitionLeaders.get(node), avgLeadersCntPerBroker);
}
// Returns difference (in percents) from average value, null if it can't be calculated
@Nullable
private BigDecimal calculateAvgSkew(@Nullable Integer value, double avgValue) {
if (avgValue == 0 || !skewCanBeCalculated) {
return null;
}
value = value == null ? 0 : value;
return new BigDecimal((value - avgValue) / avgValue * 100.0).round(ROUNDING_MATH_CTX);
}
}

View file

@ -1,33 +1,36 @@
package com.provectus.kafka.ui.service;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.util.SslPropertiesUtil;
import java.io.Closeable;
import java.time.Instant;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import lombok.RequiredArgsConstructor;
import lombok.Setter;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import reactor.core.publisher.Mono;
@Service
@RequiredArgsConstructor
@Slf4j
public class AdminClientServiceImpl implements AdminClientService, Closeable {
private static final int DEFAULT_CLIENT_TIMEOUT_MS = 30_000;
private static final AtomicLong CLIENT_ID_SEQ = new AtomicLong();
private final Map<String, ReactiveAdminClient> adminClientCache = new ConcurrentHashMap<>();
@Setter // used in tests
@Value("${kafka.admin-client-timeout:30000}")
private int clientTimeout;
private final int clientTimeout;
public AdminClientServiceImpl(ClustersProperties clustersProperties) {
this.clientTimeout = Optional.ofNullable(clustersProperties.getAdminClientTimeout())
.orElse(DEFAULT_CLIENT_TIMEOUT_MS);
}
@Override
public Mono<ReactiveAdminClient> get(KafkaCluster cluster) {
@ -42,7 +45,7 @@ public class AdminClientServiceImpl implements AdminClientService, Closeable {
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
properties.putAll(cluster.getProperties());
properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
properties.putIfAbsent(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
properties.putIfAbsent(
AdminClientConfig.CLIENT_ID_CONFIG,
"kafka-ui-admin-" + Instant.now().getEpochSecond() + "-" + CLIENT_ID_SEQ.incrementAndGet()

View file

@ -10,6 +10,7 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
import com.provectus.kafka.ui.model.InternalBroker;
import com.provectus.kafka.ui.model.InternalBrokerConfig;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.PartitionDistributionStats;
import com.provectus.kafka.ui.service.metrics.RawMetric;
import java.util.Collections;
import java.util.HashMap;
@ -64,11 +65,13 @@ public class BrokerService {
}
public Flux<InternalBroker> getBrokers(KafkaCluster cluster) {
var stats = statisticsCache.get(cluster);
var partitionsDistribution = PartitionDistributionStats.create(stats);
return adminClientService
.get(cluster)
.flatMap(ReactiveAdminClient::describeCluster)
.map(description -> description.getNodes().stream()
.map(node -> new InternalBroker(node, statisticsCache.get(cluster)))
.map(node -> new InternalBroker(node, partitionsDistribution, stats))
.collect(Collectors.toList()))
.flatMapMany(Flux::fromIterable);
}

View file

@ -101,6 +101,9 @@ public class ConsumerGroupService {
public record ConsumerGroupsPage(List<InternalConsumerGroup> consumerGroups, int totalPages) {
}
private record GroupWithDescr(InternalConsumerGroup icg, ConsumerGroupDescription cgd) {
}
public Mono<ConsumerGroupsPage> getConsumerGroupsPage(
KafkaCluster cluster,
int pageNum,
@ -159,22 +162,19 @@ public class ConsumerGroupService {
sortAndPaginate(descriptions.values(), comparator, pageNum, perPage, sortOrderDto).toList());
}
case MESSAGES_BEHIND -> {
record GroupWithDescr(InternalConsumerGroup icg, ConsumerGroupDescription cgd) { }
Comparator<GroupWithDescr> comparator = Comparator.comparingLong(gwd ->
gwd.icg.getMessagesBehind() == null ? 0L : gwd.icg.getMessagesBehind());
var groupNames = groups.stream().map(ConsumerGroupListing::groupId).toList();
yield loadDescriptionsByInternalConsumerGroups(ac, groups, comparator, pageNum, perPage, sortOrderDto);
}
case TOPIC_NUM -> {
Comparator<GroupWithDescr> comparator = Comparator.comparingInt(gwd -> gwd.icg.getTopicNum());
yield loadDescriptionsByInternalConsumerGroups(ac, groups, comparator, pageNum, perPage, sortOrderDto);
yield ac.describeConsumerGroups(groupNames)
.flatMap(descriptionsMap -> {
List<ConsumerGroupDescription> descriptions = descriptionsMap.values().stream().toList();
return getConsumerGroups(ac, descriptions)
.map(icg -> Streams.zip(icg.stream(), descriptions.stream(), GroupWithDescr::new).toList())
.map(gwd -> sortAndPaginate(gwd, comparator, pageNum, perPage, sortOrderDto)
.map(GroupWithDescr::cgd).toList());
}
);
}
};
}
@ -209,6 +209,27 @@ public class ConsumerGroupService {
.map(cgs -> new ArrayList<>(cgs.values()));
}
private Mono<List<ConsumerGroupDescription>> loadDescriptionsByInternalConsumerGroups(ReactiveAdminClient ac,
List<ConsumerGroupListing> groups,
Comparator<GroupWithDescr> comparator,
int pageNum,
int perPage,
SortOrderDTO sortOrderDto) {
var groupNames = groups.stream().map(ConsumerGroupListing::groupId).toList();
return ac.describeConsumerGroups(groupNames)
.flatMap(descriptionsMap -> {
List<ConsumerGroupDescription> descriptions = descriptionsMap.values().stream().toList();
return getConsumerGroups(ac, descriptions)
.map(icg -> Streams.zip(icg.stream(), descriptions.stream(), GroupWithDescr::new).toList())
.map(gwd -> sortAndPaginate(gwd, comparator, pageNum, perPage, sortOrderDto)
.map(GroupWithDescr::cgd).toList());
}
);
}
public Mono<InternalConsumerGroup> getConsumerGroupDetail(KafkaCluster cluster,
String consumerGroupId) {
return adminClientService.get(cluster)

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.service;
import com.provectus.kafka.ui.client.RetryingKafkaConnectClient;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.config.WebclientProperties;
import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
import com.provectus.kafka.ui.emitter.PollingSettings;
import com.provectus.kafka.ui.model.ApplicationPropertyValidationDTO;
@ -22,9 +23,7 @@ import java.util.Optional;
import java.util.Properties;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
import org.springframework.util.unit.DataSize;
import org.springframework.web.reactive.function.client.WebClient;
@ -34,12 +33,18 @@ import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;
@Service
@RequiredArgsConstructor
@Slf4j
public class KafkaClusterFactory {
@Value("${webclient.max-in-memory-buffer-size:20MB}")
private DataSize maxBuffSize;
private static final DataSize DEFAULT_WEBCLIENT_BUFFER = DataSize.parse("20MB");
private final DataSize webClientMaxBuffSize;
public KafkaClusterFactory(WebclientProperties webclientProperties) {
this.webClientMaxBuffSize = Optional.ofNullable(webclientProperties.getMaxInMemoryBufferSize())
.map(DataSize::parse)
.orElse(DEFAULT_WEBCLIENT_BUFFER);
}
public KafkaCluster create(ClustersProperties properties,
ClustersProperties.Cluster clusterProperties) {
@ -140,7 +145,7 @@ public class KafkaClusterFactory {
url -> new RetryingKafkaConnectClient(
connectCluster.toBuilder().address(url).build(),
cluster.getSsl(),
maxBuffSize
webClientMaxBuffSize
),
ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER,
"No alive connect instances available",
@ -158,7 +163,7 @@ public class KafkaClusterFactory {
WebClient webClient = new WebClientConfigurator()
.configureSsl(clusterProperties.getSsl(), clusterProperties.getSchemaRegistrySsl())
.configureBasicAuth(auth.getUsername(), auth.getPassword())
.configureBufferSize(maxBuffSize)
.configureBufferSize(webClientMaxBuffSize)
.build();
return ReactiveFailover.create(
parseUrlList(clusterProperties.getSchemaRegistry()),
@ -181,7 +186,7 @@ public class KafkaClusterFactory {
clusterProperties.getKsqldbServerAuth(),
clusterProperties.getSsl(),
clusterProperties.getKsqldbServerSsl(),
maxBuffSize
webClientMaxBuffSize
),
ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER,
"No live ksqldb instances available",

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.service;
import com.google.common.util.concurrent.RateLimiter;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
import com.provectus.kafka.ui.emitter.MessageFilters;
@ -20,13 +21,13 @@ import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
import com.provectus.kafka.ui.util.SslPropertiesUtil;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.admin.OffsetSpec;
@ -44,16 +45,35 @@ import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
@Service
@RequiredArgsConstructor
@Slf4j
public class MessagesService {
private static final int DEFAULT_MAX_PAGE_SIZE = 500;
private static final int DEFAULT_PAGE_SIZE = 100;
// limiting UI messages rate to 20/sec in tailing mode
public static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
private final AdminClientService adminClientService;
private final DeserializationService deserializationService;
private final ConsumerGroupService consumerGroupService;
private final int maxPageSize;
private final int defaultPageSize;
public MessagesService(AdminClientService adminClientService,
DeserializationService deserializationService,
ConsumerGroupService consumerGroupService,
ClustersProperties properties) {
this.adminClientService = adminClientService;
this.deserializationService = deserializationService;
this.consumerGroupService = consumerGroupService;
var pollingProps = Optional.ofNullable(properties.getPolling())
.orElseGet(ClustersProperties.PollingProperties::new);
this.maxPageSize = Optional.ofNullable(pollingProps.getMaxPageSize())
.orElse(DEFAULT_MAX_PAGE_SIZE);
this.defaultPageSize = Optional.ofNullable(pollingProps.getDefaultPageSize())
.orElse(DEFAULT_PAGE_SIZE);
}
private Mono<TopicDescription> withExistingTopic(KafkaCluster cluster, String topicName) {
return adminClientService.get(cluster)
@ -139,7 +159,7 @@ public class MessagesService {
ConsumerPosition consumerPosition,
@Nullable String query,
MessageFilterTypeDTO filterQueryType,
int limit,
@Nullable Integer pageSize,
SeekDirectionDTO seekDirection,
@Nullable String keySerde,
@Nullable String valueSerde) {
@ -147,7 +167,13 @@ public class MessagesService {
.flux()
.publishOn(Schedulers.boundedElastic())
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
filterQueryType, limit, seekDirection, keySerde, valueSerde));
filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
}
private int fixPageSize(@Nullable Integer pageSize) {
return Optional.ofNullable(pageSize)
.filter(ps -> ps > 0 && ps <= maxPageSize)
.orElse(defaultPageSize);
}
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.util;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.config.WebclientProperties;
import com.provectus.kafka.ui.config.auth.OAuthProperties;
import com.provectus.kafka.ui.config.auth.RoleBasedAccessControlProperties;
import com.provectus.kafka.ui.exception.FileUploadException;
@ -97,6 +98,7 @@ public class DynamicConfigOperations {
.type(ctx.getEnvironment().getProperty("auth.type"))
.oauth2(getNullableBean(OAuthProperties.class))
.build())
.webclient(getNullableBean(WebclientProperties.class))
.build();
}
@ -204,6 +206,7 @@ public class DynamicConfigOperations {
private ClustersProperties kafka;
private RoleBasedAccessControlProperties rbac;
private Auth auth;
private WebclientProperties webclient;
@Data
@Builder
@ -222,6 +225,9 @@ public class DynamicConfigOperations {
Optional.ofNullable(auth)
.flatMap(a -> Optional.ofNullable(a.oauth2))
.ifPresent(OAuthProperties::validate);
Optional.ofNullable(webclient)
.ifPresent(WebclientProperties::validate);
}
}

View file

@ -4,9 +4,9 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
// Specifies field that can contain any kind of value - primitive, complex and nulls
public class AnyFieldSchema implements FieldSchema {
class AnyFieldSchema implements FieldSchema {
public static AnyFieldSchema get() {
static AnyFieldSchema get() {
return new AnyFieldSchema();
}

View file

@ -4,10 +4,10 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
public class ArrayFieldSchema implements FieldSchema {
class ArrayFieldSchema implements FieldSchema {
private final FieldSchema itemsSchema;
public ArrayFieldSchema(FieldSchema itemsSchema) {
ArrayFieldSchema(FieldSchema itemsSchema) {
this.itemsSchema = itemsSchema;
}

View file

@ -7,10 +7,10 @@ import java.util.List;
import java.util.Map;
public class EnumJsonType extends JsonType {
class EnumJsonType extends JsonType {
private final List<String> values;
public EnumJsonType(List<String> values) {
EnumJsonType(List<String> values) {
super(Type.ENUM);
this.values = values;
}

View file

@ -3,6 +3,6 @@ package com.provectus.kafka.ui.util.jsonschema;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
public interface FieldSchema {
interface FieldSchema {
JsonNode toJsonNode(ObjectMapper mapper);
}

View file

@ -4,7 +4,7 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Map;
public abstract class JsonType {
abstract class JsonType {
protected final Type type;
@ -12,13 +12,13 @@ public abstract class JsonType {
this.type = type;
}
public Type getType() {
Type getType() {
return type;
}
public abstract Map<String, JsonNode> toJsonNode(ObjectMapper mapper);
abstract Map<String, JsonNode> toJsonNode(ObjectMapper mapper);
public enum Type {
enum Type {
NULL,
BOOLEAN,
OBJECT,

View file

@ -2,21 +2,27 @@ package com.provectus.kafka.ui.util.jsonschema;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import javax.annotation.Nullable;
public class MapFieldSchema implements FieldSchema {
private final FieldSchema itemSchema;
class MapFieldSchema implements FieldSchema {
private final @Nullable FieldSchema itemSchema;
public MapFieldSchema(FieldSchema itemSchema) {
MapFieldSchema(@Nullable FieldSchema itemSchema) {
this.itemSchema = itemSchema;
}
MapFieldSchema() {
this(null);
}
@Override
public JsonNode toJsonNode(ObjectMapper mapper) {
final ObjectNode objectNode = mapper.createObjectNode();
objectNode.set("type", new TextNode(JsonType.Type.OBJECT.getName()));
objectNode.set("additionalProperties", itemSchema.toJsonNode(mapper));
objectNode.set("additionalProperties", itemSchema != null ? itemSchema.toJsonNode(mapper) : BooleanNode.TRUE);
return objectNode;
}
}

View file

@ -9,24 +9,24 @@ import java.util.stream.Collectors;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuples;
public class ObjectFieldSchema implements FieldSchema {
class ObjectFieldSchema implements FieldSchema {
public static final ObjectFieldSchema EMPTY = new ObjectFieldSchema(Map.of(), List.of());
static final ObjectFieldSchema EMPTY = new ObjectFieldSchema(Map.of(), List.of());
private final Map<String, FieldSchema> properties;
private final List<String> required;
public ObjectFieldSchema(Map<String, FieldSchema> properties,
ObjectFieldSchema(Map<String, FieldSchema> properties,
List<String> required) {
this.properties = properties;
this.required = required;
}
public Map<String, FieldSchema> getProperties() {
Map<String, FieldSchema> getProperties() {
return properties;
}
public List<String> getRequired() {
List<String> getRequired() {
return required;
}

View file

@ -5,11 +5,10 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.List;
import java.util.stream.Collectors;
public class OneOfFieldSchema implements FieldSchema {
class OneOfFieldSchema implements FieldSchema {
private final List<FieldSchema> schemaList;
public OneOfFieldSchema(
List<FieldSchema> schemaList) {
OneOfFieldSchema(List<FieldSchema> schemaList) {
this.schemaList = schemaList;
}

View file

@ -94,6 +94,9 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
if (wellKnownTypeSchema.isPresent()) {
return wellKnownTypeSchema.get();
}
if (field.isMapField()) {
return new MapFieldSchema();
}
final JsonType jsonType = convertType(field);
FieldSchema fieldSchema;
if (jsonType.getType().equals(JsonType.Type.OBJECT)) {
@ -149,67 +152,47 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
}
private JsonType convertType(Descriptors.FieldDescriptor field) {
switch (field.getType()) {
case INT32:
case FIXED32:
case SFIXED32:
case SINT32:
return new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", IntNode.valueOf(Integer.MAX_VALUE),
"minimum", IntNode.valueOf(Integer.MIN_VALUE)
)
);
case UINT32:
return new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
"minimum", IntNode.valueOf(0)
)
);
return switch (field.getType()) {
case INT32, FIXED32, SFIXED32, SINT32 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", IntNode.valueOf(Integer.MAX_VALUE),
"minimum", IntNode.valueOf(Integer.MIN_VALUE)
)
);
case UINT32 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
"minimum", IntNode.valueOf(0)
)
);
//TODO: actually all *64 types will be printed with quotes (as strings),
// see JsonFormat::printSingleFieldValue for impl. This can cause problems when you copy-paste from messages
// table to `Produce` area - need to think if it is critical or not.
case INT64:
case FIXED64:
case SFIXED64:
case SINT64:
return new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", LongNode.valueOf(Long.MAX_VALUE),
"minimum", LongNode.valueOf(Long.MIN_VALUE)
)
);
case UINT64:
return new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
"minimum", LongNode.valueOf(0)
)
);
case MESSAGE:
case GROUP:
return new SimpleJsonType(JsonType.Type.OBJECT);
case ENUM:
return new EnumJsonType(
field.getEnumType().getValues().stream()
.map(Descriptors.EnumValueDescriptor::getName)
.collect(Collectors.toList())
);
case BYTES:
case STRING:
return new SimpleJsonType(JsonType.Type.STRING);
case FLOAT:
case DOUBLE:
return new SimpleJsonType(JsonType.Type.NUMBER);
case BOOL:
return new SimpleJsonType(JsonType.Type.BOOLEAN);
default:
return new SimpleJsonType(JsonType.Type.STRING);
}
case INT64, FIXED64, SFIXED64, SINT64 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", LongNode.valueOf(Long.MAX_VALUE),
"minimum", LongNode.valueOf(Long.MIN_VALUE)
)
);
case UINT64 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
"minimum", LongNode.valueOf(0)
)
);
case MESSAGE, GROUP -> new SimpleJsonType(JsonType.Type.OBJECT);
case ENUM -> new EnumJsonType(
field.getEnumType().getValues().stream()
.map(Descriptors.EnumValueDescriptor::getName)
.collect(Collectors.toList())
);
case BYTES, STRING -> new SimpleJsonType(JsonType.Type.STRING);
case FLOAT, DOUBLE -> new SimpleJsonType(JsonType.Type.NUMBER);
case BOOL -> new SimpleJsonType(JsonType.Type.BOOLEAN);
};
}
}

View file

@ -4,10 +4,10 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.TextNode;
public class RefFieldSchema implements FieldSchema {
class RefFieldSchema implements FieldSchema {
private final String ref;
public RefFieldSchema(String ref) {
RefFieldSchema(String ref) {
this.ref = ref;
}
@ -16,7 +16,7 @@ public class RefFieldSchema implements FieldSchema {
return mapper.createObjectNode().set("$ref", new TextNode(ref));
}
public String getRef() {
String getRef() {
return ref;
}
}

View file

@ -3,10 +3,10 @@ package com.provectus.kafka.ui.util.jsonschema;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
public class SimpleFieldSchema implements FieldSchema {
class SimpleFieldSchema implements FieldSchema {
private final JsonType type;
public SimpleFieldSchema(JsonType type) {
SimpleFieldSchema(JsonType type) {
this.type = type;
}

View file

@ -6,15 +6,15 @@ import com.fasterxml.jackson.databind.node.TextNode;
import com.google.common.collect.ImmutableMap;
import java.util.Map;
public class SimpleJsonType extends JsonType {
class SimpleJsonType extends JsonType {
private final Map<String, JsonNode> additionalTypeProperties;
public SimpleJsonType(Type type) {
SimpleJsonType(Type type) {
this(type, Map.of());
}
public SimpleJsonType(Type type, Map<String, JsonNode> additionalTypeProperties) {
SimpleJsonType(Type type, Map<String, JsonNode> additionalTypeProperties) {
super(type);
this.additionalTypeProperties = additionalTypeProperties;
}

View file

@ -0,0 +1,83 @@
package com.provectus.kafka.ui.model;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.service.ReactiveAdminClient;
import java.math.BigDecimal;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartitionInfo;
import org.assertj.core.data.Percentage;
import org.junit.jupiter.api.Test;
class PartitionDistributionStatsTest {
@Test
void skewCalculatedBasedOnPartitionsCounts() {
Node n1 = new Node(1, "n1", 9092);
Node n2 = new Node(2, "n2", 9092);
Node n3 = new Node(3, "n3", 9092);
Node n4 = new Node(4, "n4", 9092);
var stats = PartitionDistributionStats.create(
Statistics.builder()
.clusterDescription(
new ReactiveAdminClient.ClusterDescription(null, "test", Set.of(n1, n2, n3), null))
.topicDescriptions(
Map.of(
"t1", new TopicDescription(
"t1", false,
List.of(
new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)),
new TopicPartitionInfo(1, n2, List.of(n2, n3), List.of(n2, n3))
)
),
"t2", new TopicDescription(
"t2", false,
List.of(
new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)),
new TopicPartitionInfo(1, null, List.of(n2, n1), List.of(n1))
)
)
)
)
.build(), 4
);
assertThat(stats.getPartitionLeaders())
.containsExactlyInAnyOrderEntriesOf(Map.of(n1, 2, n2, 1));
assertThat(stats.getPartitionsCount())
.containsExactlyInAnyOrderEntriesOf(Map.of(n1, 3, n2, 4, n3, 1));
assertThat(stats.getInSyncPartitions())
.containsExactlyInAnyOrderEntriesOf(Map.of(n1, 3, n2, 3, n3, 1));
// Node(partitions): n1(3), n2(4), n3(1), n4(0)
// average partitions cnt = (3+4+1) / 3 = 2.666 (counting only nodes with partitions!)
assertThat(stats.getAvgPartitionsPerBroker())
.isCloseTo(2.666, Percentage.withPercentage(1));
assertThat(stats.partitionsSkew(n1))
.isCloseTo(BigDecimal.valueOf(12.5), Percentage.withPercentage(1));
assertThat(stats.partitionsSkew(n2))
.isCloseTo(BigDecimal.valueOf(50), Percentage.withPercentage(1));
assertThat(stats.partitionsSkew(n3))
.isCloseTo(BigDecimal.valueOf(-62.5), Percentage.withPercentage(1));
assertThat(stats.partitionsSkew(n4))
.isCloseTo(BigDecimal.valueOf(-100), Percentage.withPercentage(1));
// Node(leaders): n1(2), n2(1), n3(0), n4(0)
// average leaders cnt = (2+1) / 2 = 1.5 (counting only nodes with leaders!)
assertThat(stats.leadersSkew(n1))
.isCloseTo(BigDecimal.valueOf(33.33), Percentage.withPercentage(1));
assertThat(stats.leadersSkew(n2))
.isCloseTo(BigDecimal.valueOf(-33.33), Percentage.withPercentage(1));
assertThat(stats.leadersSkew(n3))
.isCloseTo(BigDecimal.valueOf(-100), Percentage.withPercentage(1));
assertThat(stats.leadersSkew(n4))
.isCloseTo(BigDecimal.valueOf(-100), Percentage.withPercentage(1));
}
}

View file

@ -59,8 +59,10 @@ class ProtobufSchemaConverterTest {
TestMsg outer_ref = 2;
EmbeddedMsg self_ref = 3;
}
}""";
map<int32, string> intToStringMap = 21;
map<string, EmbeddedMsg> strToObjMap = 22;
}""";
String expectedJsonSchema = """
{
@ -109,7 +111,9 @@ class ProtobufSchemaConverterTest {
"v2": { "type": [ "number", "string", "object", "array", "boolean", "null" ] },
"uint32_w_field": { "type": "integer", "maximum": 4294967295, "minimum": 0 },
"bool_w_field": { "type": "boolean" },
"uint64_w_field": { "type": "integer", "maximum": 18446744073709551615, "minimum": 0 }
"uint64_w_field": { "type": "integer", "maximum": 18446744073709551615, "minimum": 0 },
"strToObjMap": { "type": "object", "additionalProperties": true },
"intToStringMap": { "type": "object", "additionalProperties": true }
}
},
"test.TestMsg.EmbeddedMsg": {

View file

@ -2375,6 +2375,16 @@ components:
type: number
bytesOutPerSec:
type: number
partitionsLeader:
type: integer
partitions:
type: integer
inSyncPartitions:
type: integer
partitionsSkew:
type: number
leadersSkew:
type: number
required:
- id
@ -2441,6 +2451,7 @@ components:
- MEMBERS
- STATE
- MESSAGES_BEHIND
- TOPIC_NUM
ConsumerGroupsPageResponse:
type: object
@ -3467,6 +3478,12 @@ components:
type: array
items:
$ref: '#/components/schemas/Action'
webclient:
type: object
properties:
maxInMemoryBufferSize:
type: string
description: "examples: 20, 12KB, 5MB"
kafka:
type: object
properties:
@ -3479,6 +3496,14 @@ components:
type: integer
noDataEmptyPolls:
type: integer
maxPageSize:
type: integer
defaultPageSize:
type: integer
adminClientTimeout:
type: integer
internalTopicPrefix:
type: string
clusters:
type: array
items:

View file

@ -36,29 +36,31 @@ import org.springframework.web.reactive.function.client.WebClientResponseExcepti
@Slf4j
public class ApiService extends BaseSource {
private final ApiClient apiClient = new ApiClient().setBasePath(BASE_API_URL);
@SneakyThrows
private TopicsApi topicApi() {
return new TopicsApi(new ApiClient().setBasePath(BASE_API_URL));
return new TopicsApi(apiClient);
}
@SneakyThrows
private SchemasApi schemaApi() {
return new SchemasApi(new ApiClient().setBasePath(BASE_API_URL));
return new SchemasApi(apiClient);
}
@SneakyThrows
private KafkaConnectApi connectorApi() {
return new KafkaConnectApi(new ApiClient().setBasePath(BASE_API_URL));
return new KafkaConnectApi(apiClient);
}
@SneakyThrows
private MessagesApi messageApi() {
return new MessagesApi(new ApiClient().setBasePath(BASE_API_URL));
return new MessagesApi(apiClient);
}
@SneakyThrows
private KsqlApi ksqlApi() {
return new KsqlApi(new ApiClient().setBasePath(BASE_API_URL));
return new KsqlApi(apiClient);
}
@SneakyThrows

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.manualsuite.backlog;
import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.BROKERS_SUITE_ID;
import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.KSQL_DB_SUITE_ID;
import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.SCHEMAS_SUITE_ID;
import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.TOPICS_PROFILE_SUITE_ID;
import static com.provectus.kafka.ui.utilities.qase.enums.State.TO_BE_AUTOMATED;
@ -28,30 +29,72 @@ public class SmokeBacklog extends BaseManualTest {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = BROKERS_SUITE_ID)
@QaseId(331)
@Suite(id = KSQL_DB_SUITE_ID)
@QaseId(278)
@Test
public void testCaseC() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = BROKERS_SUITE_ID)
@QaseId(332)
@QaseId(331)
@Test
public void testCaseD() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = TOPICS_PROFILE_SUITE_ID)
@QaseId(335)
@Suite(id = BROKERS_SUITE_ID)
@QaseId(332)
@Test
public void testCaseE() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = TOPICS_PROFILE_SUITE_ID)
@QaseId(336)
@QaseId(335)
@Test
public void testCaseF() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = TOPICS_PROFILE_SUITE_ID)
@QaseId(336)
@Test
public void testCaseG() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = TOPICS_PROFILE_SUITE_ID)
@QaseId(343)
@Test
public void testCaseH() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = KSQL_DB_SUITE_ID)
@QaseId(344)
@Test
public void testCaseI() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = SCHEMAS_SUITE_ID)
@QaseId(345)
@Test
public void testCaseJ() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = SCHEMAS_SUITE_ID)
@QaseId(346)
@Test
public void testCaseK() {
}
@Automation(state = TO_BE_AUTOMATED)
@Suite(id = TOPICS_PROFILE_SUITE_ID)
@QaseId(347)
@Test
public void testCaseL() {
}
}

View file

@ -92,4 +92,28 @@ public class TopicsTest extends BaseManualTest {
@Test
public void testCaseN() {
}
@Automation(state = NOT_AUTOMATED)
@QaseId(337)
@Test
public void testCaseO() {
}
@Automation(state = NOT_AUTOMATED)
@QaseId(339)
@Test
public void testCaseP() {
}
@Automation(state = NOT_AUTOMATED)
@QaseId(341)
@Test
public void testCaseQ() {
}
@Automation(state = NOT_AUTOMATED)
@QaseId(342)
@Test
public void testCaseR() {
}
}

View file

@ -14,4 +14,16 @@ public class WizardTest extends BaseManualTest {
@Test
public void testCaseA() {
}
@Automation(state = NOT_AUTOMATED)
@QaseId(338)
@Test
public void testCaseB() {
}
@Automation(state = NOT_AUTOMATED)
@QaseId(340)
@Test
public void testCaseC() {
}
}

View file

@ -51,9 +51,9 @@ const List = () => {
accessorKey: 'members',
},
{
id: ConsumerGroupOrdering.TOPIC_NUM,
header: 'Num Of Topics',
accessorKey: 'topics',
enableSorting: false,
},
{
id: ConsumerGroupOrdering.MESSAGES_BEHIND,

View file

@ -4,11 +4,7 @@ import { CellContext } from '@tanstack/react-table';
import ClusterContext from 'components/contexts/ClusterContext';
import { ClusterNameRoute } from 'lib/paths';
import useAppParams from 'lib/hooks/useAppParams';
import {
Dropdown,
DropdownItem,
DropdownItemHint,
} from 'components/common/Dropdown';
import { Dropdown, DropdownItemHint } from 'components/common/Dropdown';
import {
useDeleteTopic,
useClearTopicMessages,
@ -55,7 +51,8 @@ const ActionsCell: React.FC<CellContext<Topic, unknown>> = ({ row }) => {
with DELETE policy
</DropdownItemHint>
</ActionDropdownItem>
<DropdownItem
<ActionDropdownItem
disabled={!isTopicDeletionAllowed}
onClick={recreateTopic.mutateAsync}
confirm={
<>
@ -63,9 +60,14 @@ const ActionsCell: React.FC<CellContext<Topic, unknown>> = ({ row }) => {
</>
}
danger
permission={{
resource: ResourceType.TOPIC,
action: [Action.VIEW, Action.CREATE, Action.DELETE],
value: name,
}}
>
Recreate Topic
</DropdownItem>
</ActionDropdownItem>
<ActionDropdownItem
disabled={!isTopicDeletionAllowed}
onClick={() => deleteTopic.mutateAsync(name)}

View file

@ -27,7 +27,7 @@ export interface AddEditFilterContainerProps {
inputDisplayNameDefaultValue?: string;
inputCodeDefaultValue?: string;
isAdd?: boolean;
submitCallback?: (values: AddMessageFilters) => Promise<void>;
submitCallback?: (values: AddMessageFilters) => void;
}
const AddEditFilterContainer: React.FC<AddEditFilterContainerProps> = ({

View file

@ -29,8 +29,10 @@ const Message: React.FC<Props> = ({
timestampType,
offset,
key,
keySize,
partition,
content,
valueSize,
headers,
valueSerde,
keySerde,
@ -138,6 +140,8 @@ const Message: React.FC<Props> = ({
headers={headers}
timestamp={timestamp}
timestampType={timestampType}
keySize={keySize}
contentSize={valueSize}
/>
)}
</>

View file

@ -15,6 +15,8 @@ export interface MessageContentProps {
headers?: { [key: string]: string | undefined };
timestamp?: Date;
timestampType?: TopicMessageTimestampTypeEnum;
keySize?: number;
contentSize?: number;
}
const MessageContent: React.FC<MessageContentProps> = ({
@ -23,6 +25,8 @@ const MessageContent: React.FC<MessageContentProps> = ({
headers,
timestamp,
timestampType,
keySize,
contentSize,
}) => {
const [activeTab, setActiveTab] = React.useState<Tab>('content');
const [searchParams] = useSearchParams();
@ -54,8 +58,7 @@ const MessageContent: React.FC<MessageContentProps> = ({
e.preventDefault();
setActiveTab('headers');
};
const keySize = new TextEncoder().encode(messageKey).length;
const contentSize = new TextEncoder().encode(messageContent).length;
const contentType =
messageContent && messageContent.trim().startsWith('{')
? SchemaType.JSON

View file

@ -26,7 +26,7 @@
<assertj.version>3.19.0</assertj.version>
<avro.version>1.11.1</avro.version>
<byte-buddy.version>1.12.19</byte-buddy.version>
<confluent.version>7.3.0</confluent.version>
<confluent.version>7.3.3</confluent.version>
<datasketches-java.version>3.1.0</datasketches-java.version>
<groovy.version>3.0.13</groovy.version>
<jackson.version>2.14.0</jackson.version>
@ -43,7 +43,7 @@
<!-- Test dependency versions -->
<junit.version>5.9.1</junit.version>
<mockito.version>5.1.1</mockito.version>
<mockito.version>5.3.0</mockito.version>
<okhttp3.mockwebserver.version>4.10.0</okhttp3.mockwebserver.version>
<testcontainers.version>1.17.5</testcontainers.version>
@ -52,7 +52,7 @@
<pnpm.version>v7.4.0</pnpm.version>
<!-- Plugin versions -->
<fabric8-maven-plugin.version>0.42.0</fabric8-maven-plugin.version>
<fabric8-maven-plugin.version>0.42.1</fabric8-maven-plugin.version>
<frontend-maven-plugin.version>1.12.1</frontend-maven-plugin.version>
<maven-clean-plugin.version>3.2.0</maven-clean-plugin.version>
<maven-compiler-plugin.version>3.10.1</maven-compiler-plugin.version>