Compare commits

..

23 commits

Author SHA1 Message Date
iliax
d2e6e6a509 merged with master 2023-08-21 17:40:52 +04:00
iliax
3c569aaa02 Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE-3504_messagesApiV2
 Conflicts:
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/OffsetsInfo.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/SeekOperations.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/ConsumerRecordDeserializer.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
	kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
2023-08-21 13:43:23 +04:00
Roman Zabaluev
32af672aee
Merge branch 'master' into ISSUE-3504_messagesApiV2 2023-08-03 19:10:32 +07:00
iliax
c845786ba9 master merge 2023-08-02 12:47:42 +04:00
iliax
df663967a9 Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE-3504_messagesApiV2
 Conflicts:
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java
	kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
2023-08-02 12:14:47 +04:00
iliax
e04928ca0d master merge 2023-08-01 15:51:37 +04:00
iliax
3e02e3f6ea Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE-3504_messagesApiV2
 Conflicts:
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
	kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java
2023-08-01 15:23:57 +04:00
iliax
b5a52fc432 PR comments fx 2023-07-11 12:03:32 +04:00
iliax
911bbc8684 minor improvements 2023-04-24 23:54:14 +04:00
iliax
f05e8bbae3 new tests 2023-04-24 23:40:49 +04:00
iliax
153745e9e8 new tests 2023-04-24 23:21:32 +04:00
iliax
8f217221e4 api updates 2023-04-24 20:00:45 +04:00
iliax
b9e8b3376b Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE-3504_messagesApiV2
 Conflicts:
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
	kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
2023-04-24 11:45:32 +04:00
Ilya Kuramshin
498a099829
Merge branch 'master' into ISSUE-3504_messagesApiV2 2023-03-23 14:45:16 +04:00
iliax
d6244b9b91 wip 2023-03-23 14:43:19 +04:00
iliax
f5d282e529 wip 2023-03-23 00:01:07 +04:00
iliax
23157b7234 wip 2023-03-22 23:52:50 +04:00
iliax
7f2f1611bd wip 2023-03-22 23:29:48 +04:00
iliax
35c63b8e85 wip 2023-03-22 23:21:37 +04:00
iliax
306c1fb1b7 wip 2023-03-22 23:18:05 +04:00
iliax
2f6781758b Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE-3504_messagesApiV2 2023-03-21 21:58:35 +04:00
iliax
4be46ec520 wip 2023-03-21 19:13:01 +04:00
iliax
a279c678d3 wip 2023-03-21 11:50:57 +04:00
105 changed files with 3356 additions and 3161 deletions

View file

@ -31,7 +31,7 @@ jobs:
echo "Packer will be triggered in this dir $WORK_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}

View file

@ -45,7 +45,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -42,7 +42,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -55,7 +55,7 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.12.0
uses: aquasecurity/trivy-action@0.11.2
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"

View file

@ -15,7 +15,7 @@ jobs:
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -24,7 +24,7 @@ jobs:
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -18,7 +18,7 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}

View file

@ -11,7 +11,7 @@ jobs:
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -25,11 +25,11 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
- uses: pnpm/action-setup@v2.4.0
with:
version: 8.6.12
version: 7.4.0
- name: Install node
uses: actions/setup-node@v3.8.1
with:
node-version: "18.17.1"
node-version: "16.15.0"
cache: "pnpm"
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
- name: Install Node dependencies

View file

@ -47,7 +47,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -26,7 +26,7 @@ jobs:
echo "Terraform will be triggered in this dir $TF_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -91,7 +91,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
Then access the web UI at [http://localhost:8080](http://localhost:8080)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
## Persistent installation

View file

@ -81,12 +81,6 @@
<groupId>io.confluent</groupId>
<artifactId>kafka-json-schema-serializer</artifactId>
<version>${confluent.version}</version>
<exclusions>
<exclusion>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
@ -141,11 +135,6 @@
<artifactId>commons-pool2</artifactId>
<version>${apache.commons.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>4.4</version>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
@ -249,6 +238,8 @@
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-ldap</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-jsr223</artifactId>
@ -403,7 +394,7 @@
<plugin>
<groupId>pl.project13.maven</groupId>
<artifactId>git-commit-id-plugin</artifactId>
<version>4.9.10</version>
<version>4.0.0</version>
<executions>
<execution>
<id>get-the-git-infos</id>

View file

@ -7,6 +7,8 @@ import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpResponse;
import org.springframework.web.reactive.config.CorsRegistry;
import org.springframework.web.reactive.config.WebFluxConfigurer;
import org.springframework.web.server.ServerWebExchange;
import org.springframework.web.server.WebFilter;
import org.springframework.web.server.WebFilterChain;

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.config;
import com.provectus.kafka.ui.exception.ValidationException;
import java.beans.Transient;
import javax.annotation.PostConstruct;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import lombok.Value;
public record AuthenticatedUser(String principal, Collection<String> groups) {

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import java.util.Map;
import lombok.Value;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.oauth2.core.user.OAuth2User;

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import java.util.Map;
import lombok.Value;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;

View file

@ -13,6 +13,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -37,7 +38,7 @@ public class AccessController implements AuthorizationApi {
.filter(role -> user.groups().contains(role.getName()))
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
.flatMap(Collection::stream)
.toList()
.collect(Collectors.toList())
)
.switchIfEmpty(Mono.just(Collections.emptyList()));
@ -69,10 +70,10 @@ public class AccessController implements AuthorizationApi {
.map(String::toUpperCase)
.map(this::mapAction)
.filter(Objects::nonNull)
.toList());
.collect(Collectors.toList()));
return dto;
})
.toList();
.collect(Collectors.toList());
}
@Nullable

View file

@ -82,13 +82,12 @@ public class ApplicationConfigController extends AbstractController implements A
.build();
return validateAccess(context)
.then(restartRequestDto)
.doOnNext(restartDto -> {
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
dynamicConfigOperations.persist(newConfig);
.<ResponseEntity<Void>>map(dto -> {
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
restarter.requestRestart();
return ResponseEntity.ok().build();
})
.doOnEach(sig -> audit(context, sig))
.doOnSuccess(dto -> restarter.requestRestart())
.map(dto -> ResponseEntity.ok().build());
.doOnEach(sig -> audit(context, sig));
}
@Override
@ -117,8 +116,8 @@ public class ApplicationConfigController extends AbstractController implements A
return validateAccess(context)
.then(configDto)
.flatMap(config -> {
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = newConfig.getKafka();
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = propertiesStructure.getKafka();
return validateClustersConfig(clustersProperties)
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
})

View file

@ -26,8 +26,6 @@ import reactor.core.publisher.Mono;
@RequiredArgsConstructor
@Slf4j
public class BrokersController extends AbstractController implements BrokersApi {
private static final String BROKER_ID = "brokerId";
private final BrokerService brokerService;
private final ClusterMapper clusterMapper;
@ -91,7 +89,7 @@ public class BrokersController extends AbstractController implements BrokersApi
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW)
.operationName("getBrokerConfig")
.operationParams(Map.of(BROKER_ID, id))
.operationParams(Map.of("brokerId", id))
.build();
return validateAccess(context).thenReturn(
@ -110,7 +108,7 @@ public class BrokersController extends AbstractController implements BrokersApi
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.operationName("updateBrokerTopicPartitionLogDir")
.operationParams(Map.of(BROKER_ID, id))
.operationParams(Map.of("brokerId", id))
.build();
return validateAccess(context).then(
@ -130,7 +128,7 @@ public class BrokersController extends AbstractController implements BrokersApi
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.operationName("updateBrokerConfigByName")
.operationParams(Map.of(BROKER_ID, id))
.operationParams(Map.of("brokerId", id))
.build();
return validateAccess(context).then(

View file

@ -22,6 +22,7 @@ import com.provectus.kafka.ui.service.OffsetsResetService;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
@ -199,7 +200,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
.stream()
.map(ConsumerGroupMapper::toDto)
.toList());
.collect(Collectors.toList()));
}
}

View file

@ -36,7 +36,6 @@ import reactor.core.publisher.Mono;
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
private static final String CONNECTOR_NAME = "connectorName";
private final KafkaConnectService kafkaConnectService;
@ -113,7 +112,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("deleteConnector")
.operationParams(Map.of(CONNECTOR_NAME, connectName))
.operationParams(Map.of("connectorName", connectName))
.build();
return validateAccess(context).then(
@ -181,7 +180,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("setConnectorConfig")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess(context).then(
@ -208,7 +207,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(connectActions)
.operationName("updateConnectorState")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess(context).then(
@ -228,7 +227,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.operationName("getConnectorTasks")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess(context).thenReturn(
@ -248,7 +247,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
.operationName("restartConnectorTask")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess(context).then(

View file

@ -5,13 +5,14 @@ import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_
import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ;
import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY;
import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
import static java.util.stream.Collectors.toMap;
import com.provectus.kafka.ui.api.MessagesApi;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.MessageFilterIdDTO;
import com.provectus.kafka.ui.model.MessageFilterRegistrationDTO;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.SerdeUsageDTO;
@ -24,17 +25,12 @@ import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import com.provectus.kafka.ui.service.DeserializationService;
import com.provectus.kafka.ui.service.MessagesService;
import com.provectus.kafka.ui.util.DynamicConfigOperations;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import javax.annotation.Nullable;
import javax.validation.Valid;
import javax.validation.ValidationException;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.common.TopicPartition;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.server.ServerWebExchange;
@ -49,7 +45,6 @@ public class MessagesController extends AbstractController implements MessagesAp
private final MessagesService messagesService;
private final DeserializationService deserializationService;
private final DynamicConfigOperations dynamicConfigOperations;
@Override
public Mono<ResponseEntity<Void>> deleteTopicMessages(
@ -79,6 +74,7 @@ public class MessagesController extends AbstractController implements MessagesAp
.map(ResponseEntity::ok);
}
@Deprecated
@Override
public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessages(String clusterName,
String topicName,
@ -91,41 +87,53 @@ public class MessagesController extends AbstractController implements MessagesAp
String keySerde,
String valueSerde,
ServerWebExchange exchange) {
throw new ValidationException("Not supported");
}
@Override
public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessagesV2(String clusterName, String topicName,
PollingModeDTO mode,
List<Integer> partitions,
Integer limit,
String stringFilter,
String smartFilterId,
Long offset,
Long timestamp,
String keySerde,
String valueSerde,
String cursor,
ServerWebExchange exchange) {
var contextBuilder = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.operationName("getTopicMessages");
if (StringUtils.isNoneEmpty(q) && MessageFilterTypeDTO.GROOVY_SCRIPT == filterQueryType) {
dynamicConfigOperations.checkIfFilteringGroovyEnabled();
}
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
contextBuilder.auditActions(AuditAction.VIEW);
}
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
var accessContext = contextBuilder.build();
var positions = new ConsumerPosition(
seekType,
topicName,
parseSeekTo(topicName, seekType, seekTo)
);
Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> job = Mono.just(
ResponseEntity.ok(
messagesService.loadMessages(
getCluster(clusterName), topicName, positions, q, filterQueryType,
limit, seekDirection, keySerde, valueSerde)
)
);
var context = contextBuilder.build();
return validateAccess(context)
.then(job)
.doOnEach(sig -> audit(context, sig));
Flux<TopicMessageEventDTO> messagesFlux;
if (cursor != null) {
messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor);
} else {
messagesFlux = messagesService.loadMessages(
getCluster(clusterName),
topicName,
ConsumerPosition.create(mode, topicName, partitions, timestamp, offset),
stringFilter,
smartFilterId,
limit,
keySerde,
valueSerde
);
}
return accessControlService.validateAccess(accessContext)
.then(Mono.just(ResponseEntity.ok(messagesFlux)))
.doOnEach(sig -> auditService.audit(accessContext, sig));
}
@Override
@ -147,34 +155,6 @@ public class MessagesController extends AbstractController implements MessagesAp
).doOnEach(sig -> audit(context, sig));
}
/**
* The format is [partition]::[offset] for specifying offsets
* or [partition]::[timestamp in millis] for specifying timestamps.
*/
@Nullable
private Map<TopicPartition, Long> parseSeekTo(String topic, SeekTypeDTO seekType, List<String> seekTo) {
if (seekTo == null || seekTo.isEmpty()) {
if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) {
return null;
}
throw new ValidationException("seekTo should be set if seekType is " + seekType);
}
return seekTo.stream()
.map(p -> {
String[] split = p.split("::");
if (split.length != 2) {
throw new IllegalArgumentException(
"Wrong seekTo argument format. See API docs for details");
}
return Pair.of(
new TopicPartition(topic, Integer.parseInt(split[0])),
Long.parseLong(split[1])
);
})
.collect(toMap(Pair::getKey, Pair::getValue));
}
@Override
public Mono<ResponseEntity<TopicSerdeSuggestionDTO>> getSerdes(String clusterName,
String topicName,
@ -202,7 +182,20 @@ public class MessagesController extends AbstractController implements MessagesAp
);
}
@Override
public Mono<ResponseEntity<MessageFilterIdDTO>> registerFilter(String clusterName,
String topicName,
Mono<MessageFilterRegistrationDTO> registration,
ServerWebExchange exchange) {
final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.build());
return validateAccess.then(registration)
.map(reg -> messagesService.registerMessageFilter(reg.getFilterCode()))
.map(id -> ResponseEntity.ok(new MessageFilterIdDTO().id(id)));
}
}

View file

@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
import com.provectus.kafka.ui.service.SchemaRegistryService;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -234,7 +235,7 @@ public class SchemasController extends AbstractController implements SchemasApi
List<String> subjectsToRender = filteredSubjects.stream()
.skip(subjectToSkip)
.limit(pageSize)
.toList();
.collect(Collectors.toList());
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));

View file

@ -22,7 +22,6 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
import com.provectus.kafka.ui.model.TopicCreationDTO;
import com.provectus.kafka.ui.model.TopicDTO;
import com.provectus.kafka.ui.model.TopicDetailsDTO;
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
import com.provectus.kafka.ui.model.TopicUpdateDTO;
import com.provectus.kafka.ui.model.TopicsResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
@ -144,7 +143,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
.map(lst -> lst.stream()
.map(InternalTopicConfig::from)
.map(clusterMapper::toTopicConfig)
.toList())
.collect(toList()))
.map(Flux::fromIterable)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
@ -208,7 +207,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
.map(topicsToRender ->
new TopicsResponseDTO()
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
.pageCount(totalPages));
})
.map(ResponseEntity::ok)
@ -328,34 +327,6 @@ public class TopicsController extends AbstractController implements TopicsApi {
.doOnEach(sig -> audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
String topicName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.operationName("getActiveProducerStates")
.build();
Comparator<TopicProducerStateDTO> ordering =
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
.flatMapMany(statesMap ->
Flux.fromStream(
statesMap.entrySet().stream()
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
.sorted(ordering)));
return validateAccess(context)
.thenReturn(states)
.map(ResponseEntity::ok)
.doOnEach(sig -> audit(context, sig));
}
private Comparator<InternalTopic> getComparatorForTopic(
TopicColumnsToSortDTO orderBy) {
var defaultComparator = Comparator.comparing(InternalTopic::getName);

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import jakarta.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
@ -21,12 +22,14 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
return records;
}
protected boolean sendLimitReached() {
protected boolean isSendLimitReached() {
return messagesProcessing.limitReached();
}
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
messagesProcessing.send(sink, records);
protected void send(FluxSink<TopicMessageEventDTO> sink,
Iterable<ConsumerRecord<Bytes, Bytes>> records,
@Nullable Cursor.Tracking cursor) {
messagesProcessing.send(sink, records, cursor);
}
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
@ -37,8 +40,9 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
messagesProcessing.sentConsumingInfo(sink, records);
}
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
messagesProcessing.sendFinishEvent(sink);
// cursor is null if target partitions were fully polled (no, need to do paging)
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
messagesProcessing.sendFinishEvents(sink, cursor);
sink.complete();
}
}

View file

@ -18,18 +18,15 @@ public class BackwardEmitter extends RangePollingEmitter {
int messagesPerPage,
ConsumerRecordDeserializer deserializer,
Predicate<TopicMessageDTO> filter,
PollingSettings pollingSettings) {
PollingSettings pollingSettings,
Cursor.Tracking cursor) {
super(
consumerSupplier,
consumerPosition,
messagesPerPage,
new MessagesProcessing(
deserializer,
filter,
false,
messagesPerPage
),
pollingSettings
new MessagesProcessing(deserializer, filter, false, messagesPerPage),
pollingSettings,
cursor
);
}

View file

@ -2,6 +2,8 @@ package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO;
import javax.annotation.Nullable;
import reactor.core.publisher.FluxSink;
class ConsumingStats {
@ -26,10 +28,15 @@ class ConsumingStats {
filterApplyErrors++;
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.DONE)
.cursor(
cursor != null
? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor())
: null
)
.consuming(createConsumingStats())
);
}

View file

@ -0,0 +1,90 @@
package com.provectus.kafka.ui.emitter;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
import org.apache.kafka.common.TopicPartition;
public record Cursor(ConsumerRecordDeserializer deserializer,
ConsumerPosition consumerPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
public static class Tracking {
private final ConsumerRecordDeserializer deserializer;
private final ConsumerPosition originalPosition;
private final Predicate<TopicMessageDTO> filter;
private final int limit;
private final Function<Cursor, String> registerAction;
//topic -> partition -> offset
private final Table<String, Integer, Long> trackingOffsets = HashBasedTable.create();
public Tracking(ConsumerRecordDeserializer deserializer,
ConsumerPosition originalPosition,
Predicate<TopicMessageDTO> filter,
int limit,
Function<Cursor, String> registerAction) {
this.deserializer = deserializer;
this.originalPosition = originalPosition;
this.filter = filter;
this.limit = limit;
this.registerAction = registerAction;
}
void trackOffset(String topic, int partition, long offset) {
trackingOffsets.put(topic, partition, offset);
}
void initOffsets(Map<TopicPartition, Long> initialSeekOffsets) {
initialSeekOffsets.forEach((tp, off) -> trackOffset(tp.topic(), tp.partition(), off));
}
private Map<TopicPartition, Long> getOffsetsMap(int offsetToAdd) {
Map<TopicPartition, Long> result = new HashMap<>();
trackingOffsets.rowMap()
.forEach((topic, partsMap) ->
partsMap.forEach((p, off) -> result.put(new TopicPartition(topic, p), off + offsetToAdd)));
return result;
}
String registerCursor() {
return registerAction.apply(
new Cursor(
deserializer,
new ConsumerPosition(
switch (originalPosition.pollingMode()) {
case TO_OFFSET, TO_TIMESTAMP, LATEST -> PollingModeDTO.TO_OFFSET;
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> PollingModeDTO.FROM_OFFSET;
case TAILING -> throw new IllegalStateException();
},
originalPosition.topic(),
originalPosition.partitions(),
null,
new ConsumerPosition.Offsets(
null,
getOffsetsMap(
switch (originalPosition.pollingMode()) {
case TO_OFFSET, TO_TIMESTAMP, LATEST -> 0;
// when doing forward polling we need to start from latest msg's offset + 1
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> 1;
case TAILING -> throw new IllegalStateException();
}
)
)
),
filter,
limit
)
);
}
}
}

View file

@ -18,18 +18,15 @@ public class ForwardEmitter extends RangePollingEmitter {
int messagesPerPage,
ConsumerRecordDeserializer deserializer,
Predicate<TopicMessageDTO> filter,
PollingSettings pollingSettings) {
PollingSettings pollingSettings,
Cursor.Tracking cursor) {
super(
consumerSupplier,
consumerPosition,
messagesPerPage,
new MessagesProcessing(
deserializer,
filter,
true,
messagesPerPage
),
pollingSettings
new MessagesProcessing(deserializer, filter, true, messagesPerPage),
pollingSettings,
cursor
);
}

View file

@ -1,7 +1,6 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import groovy.json.JsonSlurper;
import java.util.function.Predicate;
@ -22,23 +21,16 @@ public class MessageFilters {
private MessageFilters() {
}
public static Predicate<TopicMessageDTO> createMsgFilter(String query, MessageFilterTypeDTO type) {
switch (type) {
case STRING_CONTAINS:
return containsStringFilter(query);
case GROOVY_SCRIPT:
return groovyScriptFilter(query);
default:
throw new IllegalStateException("Unknown query type: " + type);
}
public static Predicate<TopicMessageDTO> noop() {
return e -> true;
}
static Predicate<TopicMessageDTO> containsStringFilter(String string) {
public static Predicate<TopicMessageDTO> containsStringFilter(String string) {
return msg -> StringUtils.contains(msg.getKey(), string)
|| StringUtils.contains(msg.getContent(), string);
}
static Predicate<TopicMessageDTO> groovyScriptFilter(String script) {
public static Predicate<TopicMessageDTO> groovyScriptFilter(String script) {
var engine = getGroovyEngine();
var compiledScript = compileScript(engine, script);
var jsonSlurper = new JsonSlurper();

View file

@ -39,7 +39,9 @@ class MessagesProcessing {
return limit != null && sentMessages >= limit;
}
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
void send(FluxSink<TopicMessageEventDTO> sink,
Iterable<ConsumerRecord<Bytes, Bytes>> polled,
@Nullable Cursor.Tracking cursor) {
sortForSending(polled, ascendingSortBeforeSend)
.forEach(rec -> {
if (!limitReached() && !sink.isCancelled()) {
@ -53,6 +55,9 @@ class MessagesProcessing {
);
sentMessages++;
}
if (cursor != null) {
cursor.trackOffset(rec.topic(), rec.partition(), rec.offset());
}
} catch (Exception e) {
consumingStats.incFilterApplyError();
log.trace("Error applying filter for message {}", topicMessage);
@ -67,9 +72,9 @@ class MessagesProcessing {
}
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
void sendFinishEvents(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
if (!sink.isCancelled()) {
consumingStats.sendFinishEvent(sink);
consumingStats.sendFinishEvent(sink, cursor);
}
}

View file

@ -1,10 +1,12 @@
package com.provectus.kafka.ui.emitter;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.mutable.MutableLong;
@ -27,7 +29,7 @@ class OffsetsInfo {
this(consumer,
consumer.partitionsFor(topic).stream()
.map(pi -> new TopicPartition(topic, pi.partition()))
.toList()
.collect(Collectors.toList())
);
}
@ -61,4 +63,8 @@ class OffsetsInfo {
return cnt.getValue();
}
public Set<TopicPartition> allTargetPartitions() {
return Sets.union(nonEmptyPartitions, emptyPartitions);
}
}

View file

@ -3,6 +3,7 @@ package com.provectus.kafka.ui.emitter;
import java.time.Duration;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
@ -32,6 +33,10 @@ public record PolledRecords(int count,
return records.iterator();
}
public Set<TopicPartition> partitions() {
return records.partitions();
}
private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
int polledBytes = 0;
for (ConsumerRecord<Bytes, Bytes> rec : recs) {

View file

@ -17,6 +17,7 @@ import reactor.core.publisher.FluxSink;
abstract class RangePollingEmitter extends AbstractEmitter {
private final Supplier<EnhancedConsumer> consumerSupplier;
private final Cursor.Tracking cursor;
protected final ConsumerPosition consumerPosition;
protected final int messagesPerPage;
@ -24,11 +25,13 @@ abstract class RangePollingEmitter extends AbstractEmitter {
ConsumerPosition consumerPosition,
int messagesPerPage,
MessagesProcessing messagesProcessing,
PollingSettings pollingSettings) {
PollingSettings pollingSettings,
Cursor.Tracking cursor) {
super(messagesProcessing, pollingSettings);
this.consumerPosition = consumerPosition;
this.messagesPerPage = messagesPerPage;
this.consumerSupplier = consumerSupplier;
this.cursor = cursor;
}
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
@ -46,18 +49,20 @@ abstract class RangePollingEmitter extends AbstractEmitter {
try (EnhancedConsumer consumer = consumerSupplier.get()) {
sendPhase(sink, "Consumer created");
var seekOperations = SeekOperations.create(consumer, consumerPosition);
cursor.initOffsets(seekOperations.getOffsetsForSeek());
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
log.debug("Starting from offsets {}", pollRange);
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
while (!sink.isCancelled() && !pollRange.isEmpty() && !isSendLimitReached()) {
var polled = poll(consumer, sink, pollRange);
send(sink, polled);
send(sink, polled, cursor);
pollRange = nextPollingRange(pollRange, seekOperations);
}
if (sink.isCancelled()) {
log.debug("Polling finished due to sink cancellation");
}
sendFinishStatsAndCompleteSink(sink);
sendFinishStatsAndCompleteSink(sink, pollRange.isEmpty() ? null : cursor);
log.debug("Polling finished");
} catch (InterruptException kafkaInterruptException) {
log.debug("Polling finished due to thread interruption");

View file

@ -1,13 +1,13 @@
package com.provectus.kafka.ui.emitter;
import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
import org.apache.commons.lang3.mutable.MutableLong;
@ -22,17 +22,11 @@ public class SeekOperations {
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
OffsetsInfo offsetsInfo;
if (consumerPosition.getSeekTo() == null) {
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
} else {
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet());
}
return new SeekOperations(
consumer,
offsetsInfo,
getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo())
);
OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty()
? new OffsetsInfo(consumer, consumerPosition.topic())
: new OffsetsInfo(consumer, consumerPosition.partitions());
var offsetsToSeek = getOffsetsForSeek(consumer, offsetsInfo, consumerPosition);
return new SeekOperations(consumer, offsetsInfo, offsetsToSeek);
}
public void assignAndSeekNonEmptyPartitions() {
@ -75,27 +69,26 @@ public class SeekOperations {
@VisibleForTesting
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
OffsetsInfo offsetsInfo,
SeekTypeDTO seekType,
@Nullable Map<TopicPartition, Long> seekTo) {
switch (seekType) {
case LATEST:
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
case BEGINNING:
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
case OFFSET:
Preconditions.checkNotNull(seekTo);
return fixOffsets(offsetsInfo, seekTo);
case TIMESTAMP:
Preconditions.checkNotNull(seekTo);
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
default:
throw new IllegalStateException();
}
ConsumerPosition position) {
return switch (position.pollingMode()) {
case TAILING -> consumer.endOffsets(offsetsInfo.allTargetPartitions());
case LATEST -> consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
case EARLIEST -> consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
case FROM_OFFSET, TO_OFFSET -> fixOffsets(offsetsInfo, requireNonNull(position.offsets()));
case FROM_TIMESTAMP, TO_TIMESTAMP ->
offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, requireNonNull(position.timestamp()));
};
}
private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo, Map<TopicPartition, Long> offsets) {
offsets = new HashMap<>(offsets);
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo,
ConsumerPosition.Offsets positionOffset) {
var offsets = new HashMap<TopicPartition, Long>();
if (positionOffset.offset() != null) {
offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset()));
} else {
offsets.putAll(requireNonNull(positionOffset.tpOffsets()));
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
}
Map<TopicPartition, Long> result = new HashMap<>();
offsets.forEach((tp, targetOffset) -> {
@ -112,13 +105,25 @@ public class SeekOperations {
return result;
}
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
Map<TopicPartition, Long> timestamps) {
timestamps = new HashMap<>(timestamps);
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer,
PollingModeDTO pollingMode,
OffsetsInfo offsetsInfo,
Long timestamp) {
Map<TopicPartition, Long> timestamps = new HashMap<>();
offsetsInfo.getNonEmptyPartitions().forEach(tp -> timestamps.put(tp, timestamp));
return consumer.offsetsForTimes(timestamps).entrySet().stream()
.filter(e -> e.getValue() != null)
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
Map<TopicPartition, Long> result = new HashMap<>();
consumer.offsetsForTimes(timestamps).forEach((tp, offsetAndTimestamp) -> {
if (offsetAndTimestamp == null) {
if (pollingMode == TO_TIMESTAMP && offsetsInfo.getNonEmptyPartitions().contains(tp)) {
// if no offset was returned this means that *all* timestamps are lower
// than target timestamp. Is case of TO_OFFSET mode we need to read from the ending of tp
result.put(tp, offsetsInfo.getEndOffsets().get(tp));
}
} else {
result.put(tp, offsetAndTimestamp.offset());
}
});
return result;
}
}

View file

@ -35,7 +35,7 @@ public class TailingEmitter extends AbstractEmitter {
while (!sink.isCancelled()) {
sendPhase(sink, "Polling");
var polled = poll(sink, consumer);
send(sink, polled);
send(sink, polled, null);
}
sink.complete();
log.debug("Tailing finished");
@ -55,5 +55,4 @@ public class TailingEmitter extends AbstractEmitter {
consumer.assign(seekOffsets.keySet());
seekOffsets.forEach(consumer::seek);
}
}

View file

@ -106,7 +106,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
err.setFieldName(e.getKey());
err.setRestrictions(List.copyOf(e.getValue()));
return err;
}).toList();
}).collect(Collectors.toList());
var message = fieldsErrors.isEmpty()
? exception.getMessage()

View file

@ -30,12 +30,11 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
import com.provectus.kafka.ui.model.TopicConfigDTO;
import com.provectus.kafka.ui.model.TopicDTO;
import com.provectus.kafka.ui.model.TopicDetailsDTO;
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
import com.provectus.kafka.ui.service.metrics.RawMetric;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.ProducerState;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
@ -55,7 +54,7 @@ public interface ClusterMapper {
default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
return new ClusterMetricsDTO()
.items(metrics.getSummarizedMetrics().map(this::convert).toList());
.items(metrics.getSummarizedMetrics().map(this::convert).collect(Collectors.toList()));
}
private MetricDTO convert(RawMetric rawMetric) {
@ -67,7 +66,7 @@ public interface ClusterMapper {
default BrokerMetricsDTO toBrokerMetrics(List<RawMetric> metrics) {
return new BrokerMetricsDTO()
.metrics(metrics.stream().map(this::convert).toList());
.metrics(metrics.stream().map(this::convert).collect(Collectors.toList()));
}
@Mapping(target = "isSensitive", source = "sensitive")
@ -108,7 +107,7 @@ public interface ClusterMapper {
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
return map.values().stream().map(this::toPartition).toList();
return map.values().stream().map(this::toPartition).collect(Collectors.toList());
}
default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
@ -119,17 +118,6 @@ public interface ClusterMapper {
return brokerDiskUsage;
}
default TopicProducerStateDTO map(int partition, ProducerState state) {
return new TopicProducerStateDTO()
.partition(partition)
.producerId(state.producerId())
.producerEpoch(state.producerEpoch())
.lastSequence(state.lastSequence())
.lastTimestampMs(state.lastTimestamp())
.coordinatorEpoch(state.coordinatorEpoch().stream().boxed().findAny().orElse(null))
.currentTransactionStartOffset(state.currentTransactionStartOffset().stream().boxed().findAny().orElse(null));
}
static KafkaAclDTO.OperationEnum mapAclOperation(AclOperation operation) {
return switch (operation) {
case ALL -> KafkaAclDTO.OperationEnum.ALL;

View file

@ -21,7 +21,7 @@ public class DescribeLogDirsMapper {
return logDirsInfo.entrySet().stream().map(
mapEntry -> mapEntry.getValue().entrySet().stream()
.map(e -> toBrokerLogDirs(mapEntry.getKey(), e.getKey(), e.getValue()))
.toList()
.collect(Collectors.toList())
).flatMap(Collection::stream).collect(Collectors.toList());
}
@ -35,7 +35,7 @@ public class DescribeLogDirsMapper {
var topics = logDirInfo.replicaInfos.entrySet().stream()
.collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
.map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
.toList();
.collect(Collectors.toList());
result.setTopics(topics);
return result;
}
@ -48,7 +48,7 @@ public class DescribeLogDirsMapper {
topic.setPartitions(
partitions.stream().map(
e -> topicPartitionLogDir(
broker, e.getKey().partition(), e.getValue())).toList()
broker, e.getKey().partition(), e.getValue())).collect(Collectors.toList())
);
return topic;
}

View file

@ -1,14 +1,72 @@
package com.provectus.kafka.ui.model;
import com.google.common.base.Preconditions;
import com.provectus.kafka.ui.exception.ValidationException;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.Value;
import org.apache.kafka.common.TopicPartition;
@Value
public class ConsumerPosition {
SeekTypeDTO seekType;
String topic;
@Nullable
Map<TopicPartition, Long> seekTo; // null if positioning should apply to all tps
public record ConsumerPosition(PollingModeDTO pollingMode,
String topic,
List<TopicPartition> partitions, //all partitions if list is empty
@Nullable Long timestamp,
@Nullable Offsets offsets) {
public record Offsets(@Nullable Long offset, //should be applied to all partitions
@Nullable Map<TopicPartition, Long> tpOffsets) {
public Offsets {
// only one of properties should be set
Preconditions.checkArgument((offset == null && tpOffsets != null) || (offset != null && tpOffsets == null));
}
}
public static ConsumerPosition create(PollingModeDTO pollingMode,
String topic,
@Nullable List<Integer> partitions,
@Nullable Long timestamp,
@Nullable Long offset) {
@Nullable var offsets = parseAndValidateOffsets(pollingMode, offset);
var topicPartitions = Optional.ofNullable(partitions).orElse(List.of())
.stream()
.map(p -> new TopicPartition(topic, p))
.collect(Collectors.toList());
// if offsets are specified - inferring partitions list from there
topicPartitions = (offsets != null && offsets.tpOffsets() != null)
? List.copyOf(offsets.tpOffsets().keySet())
: topicPartitions;
return new ConsumerPosition(
pollingMode,
topic,
topicPartitions,
validateTimestamp(pollingMode, timestamp),
offsets
);
}
private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long ts) {
if (pollingMode == PollingModeDTO.FROM_TIMESTAMP || pollingMode == PollingModeDTO.TO_TIMESTAMP) {
if (ts == null) {
throw new ValidationException("timestamp not provided for " + pollingMode);
}
}
return ts;
}
private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode,
@Nullable Long offset) {
if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) {
if (offset == null) {
throw new ValidationException("offsets not provided for " + pollingMode);
}
return new Offsets(offset, null);
}
return null;
}
}

View file

@ -44,7 +44,7 @@ public class InternalLogDirStats {
topicMap.getValue().replicaInfos.entrySet().stream()
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
)
).toList();
).collect(toList());
partitionsStats = topicPartitions.stream().collect(
groupingBy(

View file

@ -52,8 +52,6 @@ public class AccessContext {
}
public static final class AccessContextBuilder {
private static final String ACTIONS_NOT_PRESENT = "actions not present";
private Collection<ApplicationConfigAction> applicationConfigActions = Collections.emptySet();
private String cluster;
private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
@ -77,7 +75,7 @@ public class AccessContext {
}
public AccessContextBuilder applicationConfigActions(ApplicationConfigAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.applicationConfigActions = List.of(actions);
return this;
}
@ -88,7 +86,7 @@ public class AccessContext {
}
public AccessContextBuilder clusterConfigActions(ClusterConfigAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.clusterConfigActions = List.of(actions);
return this;
}
@ -99,7 +97,7 @@ public class AccessContext {
}
public AccessContextBuilder topicActions(TopicAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.topicActions = List.of(actions);
return this;
}
@ -110,7 +108,7 @@ public class AccessContext {
}
public AccessContextBuilder consumerGroupActions(ConsumerGroupAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.consumerGroupActions = List.of(actions);
return this;
}
@ -121,7 +119,7 @@ public class AccessContext {
}
public AccessContextBuilder connectActions(ConnectAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.connectActions = List.of(actions);
return this;
}
@ -137,25 +135,25 @@ public class AccessContext {
}
public AccessContextBuilder schemaActions(SchemaAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.schemaActions = List.of(actions);
return this;
}
public AccessContextBuilder ksqlActions(KsqlAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.ksqlActions = List.of(actions);
return this;
}
public AccessContextBuilder aclActions(AclAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.aclActions = List.of(actions);
return this;
}
public AccessContextBuilder auditActions(AuditAction... actions) {
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
Assert.isTrue(actions.length > 0, "actions not present");
this.auditActions = List.of(actions);
return this;
}

View file

@ -23,7 +23,7 @@ import javax.annotation.Nullable;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections.CollectionUtils;
import org.springframework.util.Assert;
@Getter

View file

@ -6,6 +6,7 @@ import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.util.Base64;
import java.util.Map;
import java.util.Optional;
import org.apache.kafka.common.header.Headers;
public class Base64Serde implements BuiltInSerde {

View file

@ -28,23 +28,6 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
private static final JsonMapper JSON_MAPPER = createMapper();
private static final String ASSIGNMENT = "assignment";
private static final String CLIENT_HOST = "client_host";
private static final String CLIENT_ID = "client_id";
private static final String COMMIT_TIMESTAMP = "commit_timestamp";
private static final String CURRENT_STATE_TIMESTAMP = "current_state_timestamp";
private static final String GENERATION = "generation";
private static final String LEADER = "leader";
private static final String MEMBERS = "members";
private static final String MEMBER_ID = "member_id";
private static final String METADATA = "metadata";
private static final String OFFSET = "offset";
private static final String PROTOCOL = "protocol";
private static final String PROTOCOL_TYPE = "protocol_type";
private static final String REBALANCE_TIMEOUT = "rebalance_timeout";
private static final String SESSION_TIMEOUT = "session_timeout";
private static final String SUBSCRIPTION = "subscription";
public static final String TOPIC = "__consumer_offsets";
public static String name() {
@ -133,128 +116,128 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
private Deserializer valueDeserializer() {
final Schema commitOffsetSchemaV0 =
new Schema(
new Field(OFFSET, Type.INT64, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV1 =
new Schema(
new Field(OFFSET, Type.INT64, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, ""),
new Field("expire_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV2 =
new Schema(
new Field(OFFSET, Type.INT64, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV3 =
new Schema(
new Field(OFFSET, Type.INT64, ""),
new Field("offset", Type.INT64, ""),
new Field("leader_epoch", Type.INT32, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV4 = new Schema(
new Field(OFFSET, Type.INT64, ""),
new Field("offset", Type.INT64, ""),
new Field("leader_epoch", Type.INT32, ""),
new Field(METADATA, Type.COMPACT_STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
new Field("metadata", Type.COMPACT_STRING, ""),
new Field("commit_timestamp", Type.INT64, ""),
Field.TaggedFieldsSection.of()
);
final Schema metadataSchema0 =
new Schema(
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema1 =
new Schema(
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema2 =
new Schema(
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema3 =
new Schema(
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("group_instance_id", Type.NULLABLE_STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema4 =
new Schema(
new Field(PROTOCOL_TYPE, Type.COMPACT_STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.COMPACT_NULLABLE_STRING, ""),
new Field(LEADER, Type.COMPACT_NULLABLE_STRING, ""),
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
new Field(MEMBERS, new CompactArrayOf(new Schema(
new Field(MEMBER_ID, Type.COMPACT_STRING, ""),
new Field("protocol_type", Type.COMPACT_STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new CompactArrayOf(new Schema(
new Field("member_id", Type.COMPACT_STRING, ""),
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
new Field(CLIENT_ID, Type.COMPACT_STRING, ""),
new Field(CLIENT_HOST, Type.COMPACT_STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.COMPACT_BYTES, ""),
new Field(ASSIGNMENT, Type.COMPACT_BYTES, ""),
new Field("client_id", Type.COMPACT_STRING, ""),
new Field("client_host", Type.COMPACT_STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.COMPACT_BYTES, ""),
new Field("assignment", Type.COMPACT_BYTES, ""),
Field.TaggedFieldsSection.of()
)), ""),
Field.TaggedFieldsSection.of()
@ -266,7 +249,7 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
short version = bb.getShort();
// ideally, we should distinguish if value is commit or metadata
// by checking record's key, but our current serde structure doesn't allow that.
// so, we are trying to parse into metadata first and after into commit msg
// so, we trying to parse into metadata first and after into commit msg
try {
result = toJson(
switch (version) {

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.serdes.builtin;
import com.google.common.primitives.Ints;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.util.Map;

View file

@ -0,0 +1,46 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.KafkaAvroSerializer;
import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
class AvroSchemaRegistrySerializer extends SchemaRegistrySerializer<Object> {
AvroSchemaRegistrySerializer(String topic, boolean isKey,
SchemaRegistryClient client,
SchemaMetadata schema) {
super(topic, isKey, client, schema);
}
@Override
protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaAvroSerializer(client);
serializer.configure(
Map.of(
"schema.registry.url", "wontbeused",
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
KafkaAvroSerializerConfig.AVRO_USE_LOGICAL_TYPE_CONVERTERS_CONFIG, true,
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
),
isKey
);
return serializer;
}
@Override
protected Object serialize(String value, ParsedSchema schema) {
try {
return JsonAvroConversion.convertJsonToAvro(value, ((AvroSchema) schema).rawSchema());
} catch (Throwable e) {
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
}
}
}

View file

@ -0,0 +1,79 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
class JsonSchemaSchemaRegistrySerializer extends SchemaRegistrySerializer<JsonNode> {
private static final ObjectMapper MAPPER = new ObjectMapper();
JsonSchemaSchemaRegistrySerializer(String topic,
boolean isKey,
SchemaRegistryClient client,
SchemaMetadata schema) {
super(topic, isKey, client, schema);
}
@Override
protected Serializer<JsonNode> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaJsonSchemaSerializerWithoutSchemaInfer(client);
serializer.configure(
Map.of(
"schema.registry.url", "wontbeused",
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
),
isKey
);
return serializer;
}
@Override
protected JsonNode serialize(String value, ParsedSchema schema) {
try {
JsonNode json = MAPPER.readTree(value);
((JsonSchema) schema).validate(json);
return json;
} catch (JsonProcessingException e) {
throw new ValidationException(String.format("'%s' is not valid json", value));
} catch (org.everit.json.schema.ValidationException e) {
throw new ValidationException(
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
}
}
@KafkaClientInternalsDependant
private class KafkaJsonSchemaSerializerWithoutSchemaInfer
extends KafkaJsonSchemaSerializer<JsonNode> {
KafkaJsonSchemaSerializerWithoutSchemaInfer(SchemaRegistryClient client) {
super(client);
}
/**
* Need to override original method because it tries to infer schema from input
* by checking 'schema' json field or @Schema annotation on input class, which is not
* possible in our case. So, we just skip all infer logic and pass schema directly.
*/
@Override
public byte[] serialize(String topic, JsonNode rec) {
return super.serializeImpl(
super.getSubjectName(topic, isKey, rec, schema),
rec,
(JsonSchema) schema
);
}
}
}

View file

@ -0,0 +1,50 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.Message;
import com.google.protobuf.util.JsonFormat;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
import java.util.Map;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Serializer;
class ProtobufSchemaRegistrySerializer extends SchemaRegistrySerializer<Message> {
@SneakyThrows
public ProtobufSchemaRegistrySerializer(String topic, boolean isKey,
SchemaRegistryClient client, SchemaMetadata schema) {
super(topic, isKey, client, schema);
}
@Override
protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaProtobufSerializer<>(client);
serializer.configure(
Map.of(
"schema.registry.url", "wontbeused",
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
),
isKey
);
return serializer;
}
@Override
protected Message serialize(String value, ParsedSchema schema) {
ProtobufSchema protobufSchema = (ProtobufSchema) schema;
DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
try {
JsonFormat.parser().merge(value, builder);
return builder.build();
} catch (Throwable e) {
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
}
}
}

View file

@ -1,8 +1,5 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeAvro;
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeJson;
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeProto;
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.BASIC_AUTH_CREDENTIALS_SOURCE;
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.USER_INFO_CONFIG;
@ -10,6 +7,7 @@ import com.google.common.annotations.VisibleForTesting;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.RecordHeaders;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
@ -34,21 +32,17 @@ import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
import org.apache.kafka.common.config.SslConfigs;
public class SchemaRegistrySerde implements BuiltInSerde {
private static final byte SR_PAYLOAD_MAGIC_BYTE = 0x0;
private static final int SR_PAYLOAD_PREFIX_LENGTH = 5;
public static String name() {
return "SchemaRegistry";
}
private static final String SCHEMA_REGISTRY = "schemaRegistry";
private SchemaRegistryClient schemaRegistryClient;
private List<String> schemaRegistryUrls;
private String valueSchemaNameTemplate;
@ -60,7 +54,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
@Override
public boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
return kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
return kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
.filter(lst -> !lst.isEmpty())
.isPresent();
}
@ -68,7 +62,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
@Override
public void autoConfigure(PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
var urls = kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
var urls = kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
.filter(lst -> !lst.isEmpty())
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
configure(
@ -94,7 +88,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
var urls = serdeProperties.getListProperty("url", String.class)
.or(() -> kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class))
.or(() -> kafkaClusterProperties.getListProperty("schemaRegistry", String.class))
.filter(lst -> !lst.isEmpty())
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
configure(
@ -225,8 +219,8 @@ public class SchemaRegistrySerde implements BuiltInSerde {
.convert(basePath, ((AvroSchema) parsedSchema).rawSchema())
.toJson();
case JSON ->
//need to use confluent JsonSchema since it includes resolved references
((JsonSchema) parsedSchema).rawSchema().toString();
//need to use confluent JsonSchema since it includes resolved references
((JsonSchema) parsedSchema).rawSchema().toString();
};
}
@ -258,27 +252,35 @@ public class SchemaRegistrySerde implements BuiltInSerde {
@Override
public Serializer serializer(String topic, Target type) {
String subject = schemaSubject(topic, type);
SchemaMetadata meta = getSchemaBySubject(subject)
.orElseThrow(() -> new ValidationException(
String.format("No schema for subject '%s' found", subject)));
ParsedSchema schema = getSchemaById(meta.getId())
.orElseThrow(() -> new IllegalStateException(
String.format("Schema found for id %s, subject '%s'", meta.getId(), subject)));
SchemaType schemaType = SchemaType.fromString(meta.getSchemaType())
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + meta.getSchemaType()));
var schema = getSchemaBySubject(subject)
.orElseThrow(() -> new ValidationException(String.format("No schema for subject '%s' found", subject)));
boolean isKey = type == Target.KEY;
SchemaType schemaType = SchemaType.fromString(schema.getSchemaType())
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + schema.getSchemaType()));
return switch (schemaType) {
case PROTOBUF -> input ->
serializeProto(schemaRegistryClient, topic, type, (ProtobufSchema) schema, meta.getId(), input);
case AVRO -> input ->
serializeAvro((AvroSchema) schema, meta.getId(), input);
case JSON -> input ->
serializeJson((JsonSchema) schema, meta.getId(), input);
case PROTOBUF -> new ProtobufSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
case AVRO -> new AvroSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
case JSON -> new JsonSchemaSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
};
}
@Override
public Deserializer deserializer(String topic, Target type) {
return (headers, data) -> {
return new SrDeserializer(topic);
}
///--------------------------------------------------------------
private static final byte SR_RECORD_MAGIC_BYTE = (byte) 0;
private static final int SR_RECORD_PREFIX_LENGTH = 5;
@RequiredArgsConstructor
private class SrDeserializer implements Deserializer {
private final String topic;
@Override
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
var schemaId = extractSchemaIdFromMsg(data);
SchemaType format = getMessageFormatBySchemaId(schemaId);
MessageFormatter formatter = schemaRegistryFormatters.get(format);
@ -290,7 +292,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
"type", format.name()
)
);
};
}
}
private SchemaType getMessageFormatBySchemaId(int schemaId) {
@ -302,7 +304,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
private int extractSchemaIdFromMsg(byte[] data) {
ByteBuffer buffer = ByteBuffer.wrap(data);
if (buffer.remaining() >= SR_PAYLOAD_PREFIX_LENGTH && buffer.get() == SR_PAYLOAD_MAGIC_BYTE) {
if (buffer.remaining() > SR_RECORD_PREFIX_LENGTH && buffer.get() == SR_RECORD_MAGIC_BYTE) {
return buffer.getInt();
}
throw new ValidationException(

View file

@ -0,0 +1,34 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.provectus.kafka.ui.serde.api.Serde;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Serializer;
abstract class SchemaRegistrySerializer<T> implements Serde.Serializer {
protected final Serializer<T> serializer;
protected final String topic;
protected final boolean isKey;
protected final ParsedSchema schema;
@SneakyThrows
protected SchemaRegistrySerializer(String topic, boolean isKey, SchemaRegistryClient client,
SchemaMetadata schema) {
this.topic = topic;
this.isKey = isKey;
this.serializer = createSerializer(client);
this.schema = client.getSchemaById(schema.getId());
}
protected abstract Serializer<T> createSerializer(SchemaRegistryClient client);
@Override
public byte[] serialize(String input) {
final T read = this.serialize(input, schema);
return this.serializer.serialize(topic, read);
}
protected abstract T serialize(String value, ParsedSchema schema);
}

View file

@ -1,126 +0,0 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.Message;
import com.google.protobuf.util.JsonFormat;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.serde.api.Serde;
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.schemaregistry.json.jackson.Jackson;
import io.confluent.kafka.schemaregistry.protobuf.MessageIndexes;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufSerializer;
import io.confluent.kafka.serializers.subject.DefaultReferenceSubjectNameStrategy;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.util.HashMap;
import lombok.SneakyThrows;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
final class Serialize {
private static final byte MAGIC = 0x0;
private static final ObjectMapper JSON_SERIALIZE_MAPPER = Jackson.newObjectMapper(); //from confluent package
private Serialize() {
}
@KafkaClientInternalsDependant("AbstractKafkaJsonSchemaSerializer::serializeImpl")
@SneakyThrows
static byte[] serializeJson(JsonSchema schema, int schemaId, String value) {
JsonNode json;
try {
json = JSON_SERIALIZE_MAPPER.readTree(value);
} catch (JsonProcessingException e) {
throw new ValidationException(String.format("'%s' is not valid json", value));
}
try {
schema.validate(json);
} catch (org.everit.json.schema.ValidationException e) {
throw new ValidationException(
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
}
try (var out = new ByteArrayOutputStream()) {
out.write(MAGIC);
out.write(schemaId(schemaId));
out.write(JSON_SERIALIZE_MAPPER.writeValueAsBytes(json));
return out.toByteArray();
}
}
@KafkaClientInternalsDependant("AbstractKafkaProtobufSerializer::serializeImpl")
@SneakyThrows
static byte[] serializeProto(SchemaRegistryClient srClient,
String topic,
Serde.Target target,
ProtobufSchema schema,
int schemaId,
String input) {
// flags are tuned like in ProtobufSerializer by default
boolean normalizeSchema = false;
boolean autoRegisterSchema = false;
boolean useLatestVersion = true;
boolean latestCompatStrict = true;
boolean skipKnownTypes = true;
schema = AbstractKafkaProtobufSerializer.resolveDependencies(
srClient, normalizeSchema, autoRegisterSchema, useLatestVersion, latestCompatStrict,
new HashMap<>(), skipKnownTypes, new DefaultReferenceSubjectNameStrategy(),
topic, target == Serde.Target.KEY, schema
);
DynamicMessage.Builder builder = schema.newMessageBuilder();
JsonFormat.parser().merge(input, builder);
Message message = builder.build();
MessageIndexes indexes = schema.toMessageIndexes(message.getDescriptorForType().getFullName(), normalizeSchema);
try (var out = new ByteArrayOutputStream()) {
out.write(MAGIC);
out.write(schemaId(schemaId));
out.write(indexes.toByteArray());
message.writeTo(out);
return out.toByteArray();
}
}
@KafkaClientInternalsDependant("AbstractKafkaAvroSerializer::serializeImpl")
@SneakyThrows
static byte[] serializeAvro(AvroSchema schema, int schemaId, String input) {
var avroObject = JsonAvroConversion.convertJsonToAvro(input, schema.rawSchema());
try (var out = new ByteArrayOutputStream()) {
out.write(MAGIC);
out.write(schemaId(schemaId));
Schema rawSchema = schema.rawSchema();
if (rawSchema.getType().equals(Schema.Type.BYTES)) {
Preconditions.checkState(
avroObject instanceof ByteBuffer,
"Unrecognized bytes object of type: " + avroObject.getClass().getName()
);
out.write(((ByteBuffer) avroObject).array());
} else {
boolean useLogicalTypeConverters = true;
BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
DatumWriter<Object> writer =
(DatumWriter<Object>) AvroSchemaUtils.getDatumWriter(avroObject, rawSchema, useLogicalTypeConverters);
writer.write(avroObject, encoder);
encoder.flush();
}
return out.toByteArray();
}
}
private static byte[] schemaId(int id) {
return ByteBuffer.allocate(Integer.BYTES).putInt(id).array();
}
}

View file

@ -1,8 +1,13 @@
package com.provectus.kafka.ui.service;
import com.google.common.base.Charsets;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.hash.Hashing;
import com.google.common.util.concurrent.RateLimiter;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.emitter.BackwardEmitter;
import com.provectus.kafka.ui.emitter.Cursor;
import com.provectus.kafka.ui.emitter.ForwardEmitter;
import com.provectus.kafka.ui.emitter.MessageFilters;
import com.provectus.kafka.ui.emitter.TailingEmitter;
@ -11,12 +16,12 @@ import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
import com.provectus.kafka.ui.util.SslPropertiesUtil;
import java.time.Instant;
@ -27,12 +32,12 @@ import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.producer.KafkaProducer;
@ -50,8 +55,11 @@ import reactor.core.scheduler.Schedulers;
@Slf4j
public class MessagesService {
private static final long SALT_FOR_HASHING = ThreadLocalRandom.current().nextLong();
private static final int DEFAULT_MAX_PAGE_SIZE = 500;
private static final int DEFAULT_PAGE_SIZE = 100;
// limiting UI messages rate to 20/sec in tailing mode
private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
@ -61,6 +69,12 @@ public class MessagesService {
private final int maxPageSize;
private final int defaultPageSize;
private final Cache<String, Predicate<TopicMessageDTO>> registeredFilters = CacheBuilder.newBuilder()
.maximumSize(PollingCursorsStorage.MAX_SIZE)
.build();
private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
public MessagesService(AdminClientService adminClientService,
DeserializationService deserializationService,
ConsumerGroupService consumerGroupService,
@ -86,10 +100,7 @@ public class MessagesService {
public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
Predicate<TopicMessageDTO> predicate;
try {
predicate = MessageFilters.createMsgFilter(
execData.getFilterCode(),
MessageFilterTypeDTO.GROOVY_SCRIPT
);
predicate = MessageFilters.groovyScriptFilter(execData.getFilterCode());
} catch (Exception e) {
log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
return new SmartFilterTestExecutionResultDTO()
@ -197,67 +208,103 @@ public class MessagesService {
return new KafkaProducer<>(properties);
}
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
String topic,
ConsumerPosition consumerPosition,
@Nullable String query,
MessageFilterTypeDTO filterQueryType,
@Nullable Integer pageSize,
SeekDirectionDTO seekDirection,
@Nullable String containsStringFilter,
@Nullable String filterId,
@Nullable Integer limit,
@Nullable String keySerde,
@Nullable String valueSerde) {
return loadMessages(
cluster,
topic,
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
consumerPosition,
getMsgFilter(containsStringFilter, filterId),
fixPageSize(limit)
);
}
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic, String cursorId) {
Cursor cursor = cursorsStorage.getCursor(cursorId)
.orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache."));
return loadMessages(
cluster,
topic,
cursor.deserializer(),
cursor.consumerPosition(),
cursor.filter(),
cursor.limit()
);
}
private Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
String topic,
ConsumerRecordDeserializer deserializer,
ConsumerPosition consumerPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
return withExistingTopic(cluster, topic)
.flux()
.publishOn(Schedulers.boundedElastic())
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
}
private int fixPageSize(@Nullable Integer pageSize) {
return Optional.ofNullable(pageSize)
.filter(ps -> ps > 0 && ps <= maxPageSize)
.orElse(defaultPageSize);
.flatMap(td -> loadMessagesImpl(cluster, deserializer, consumerPosition, filter, limit));
}
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
String topic,
ConsumerRecordDeserializer deserializer,
ConsumerPosition consumerPosition,
@Nullable String query,
MessageFilterTypeDTO filterQueryType,
int limit,
SeekDirectionDTO seekDirection,
@Nullable String keySerde,
@Nullable String valueSerde) {
var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
var filter = getMsgFilter(query, filterQueryType);
var emitter = switch (seekDirection) {
case FORWARD -> new ForwardEmitter(
Predicate<TopicMessageDTO> filter,
int limit) {
var emitter = switch (consumerPosition.pollingMode()) {
case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardEmitter(
() -> consumerGroupService.createConsumer(cluster),
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
consumerPosition,
limit,
deserializer,
filter,
cluster.getPollingSettings(),
cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
);
case BACKWARD -> new BackwardEmitter(
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardEmitter(
() -> consumerGroupService.createConsumer(cluster),
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
consumerPosition,
limit,
deserializer,
filter,
cluster.getPollingSettings(),
cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
);
case TAILING -> new TailingEmitter(
() -> consumerGroupService.createConsumer(cluster),
consumerPosition, deserializer, filter, cluster.getPollingSettings()
consumerPosition,
deserializer,
filter,
cluster.getPollingSettings()
);
};
return Flux.create(emitter)
.map(throttleUiPublish(seekDirection));
.map(throttleUiPublish(consumerPosition.pollingMode()));
}
private Predicate<TopicMessageDTO> getMsgFilter(String query,
MessageFilterTypeDTO filterQueryType) {
if (StringUtils.isEmpty(query)) {
return evt -> true;
private Predicate<TopicMessageDTO> getMsgFilter(@Nullable String containsStrFilter,
@Nullable String smartFilterId) {
Predicate<TopicMessageDTO> messageFilter = MessageFilters.noop();
if (containsStrFilter != null) {
messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter));
}
return MessageFilters.createMsgFilter(query, filterQueryType);
if (smartFilterId != null) {
var registered = registeredFilters.getIfPresent(smartFilterId);
if (registered == null) {
throw new ValidationException("No filter was registered with id " + smartFilterId);
}
messageFilter = messageFilter.and(registered);
}
return messageFilter;
}
private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
if (seekDirection == SeekDirectionDTO.TAILING) {
private <T> UnaryOperator<T> throttleUiPublish(PollingModeDTO pollingMode) {
if (pollingMode == PollingModeDTO.TAILING) {
RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
return m -> {
rateLimiter.acquire(1);
@ -269,4 +316,22 @@ public class MessagesService {
return UnaryOperator.identity();
}
private int fixPageSize(@Nullable Integer pageSize) {
return Optional.ofNullable(pageSize)
.filter(ps -> ps > 0 && ps <= maxPageSize)
.orElse(defaultPageSize);
}
public String registerMessageFilter(String groovyCode) {
String saltedCode = groovyCode + SALT_FOR_HASHING;
String filterId = Hashing.sha256()
.hashString(saltedCode, Charsets.UTF_8)
.toString()
.substring(0, 8);
if (registeredFilters.getIfPresent(filterId) == null) {
registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode));
}
return filterId;
}
}

View file

@ -0,0 +1,45 @@
package com.provectus.kafka.ui.service;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.provectus.kafka.ui.emitter.Cursor;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import java.util.Map;
import java.util.Optional;
import java.util.function.Predicate;
import org.apache.commons.lang3.RandomStringUtils;
public class PollingCursorsStorage {
public static final int MAX_SIZE = 10_000;
private final Cache<String, Cursor> cursorsCache = CacheBuilder.newBuilder()
.maximumSize(MAX_SIZE)
.build();
public Cursor.Tracking createNewCursor(ConsumerRecordDeserializer deserializer,
ConsumerPosition originalPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
return new Cursor.Tracking(deserializer, originalPosition, filter, limit, this::register);
}
public Optional<Cursor> getCursor(String id) {
return Optional.ofNullable(cursorsCache.getIfPresent(id));
}
public String register(Cursor cursor) {
var id = RandomStringUtils.random(8, true, true);
cursorsCache.put(id, cursor);
return id;
}
@VisibleForTesting
public Map<String, Cursor> asMap() {
return cursorsCache.asMap();
}
}

View file

@ -31,7 +31,6 @@ import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import lombok.AccessLevel;
@ -56,7 +55,6 @@ import org.apache.kafka.clients.admin.NewPartitionReassignment;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.ProducerState;
import org.apache.kafka.clients.admin.RecordsToDelete;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
@ -660,21 +658,6 @@ public class ReactiveAdminClient implements Closeable {
return toMono(client.alterReplicaLogDirs(replicaAssignment).all());
}
// returns tp -> list of active producer's states (if any)
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(String topic) {
return describeTopic(topic)
.map(td -> client.describeProducers(
IntStream.range(0, td.partitions().size())
.mapToObj(i -> new TopicPartition(topic, i))
.toList()
).all()
)
.flatMap(ReactiveAdminClient::toMono)
.map(map -> map.entrySet().stream()
.filter(e -> !e.getValue().activeProducers().isEmpty()) // skipping partitions without producers
.collect(toMap(Map.Entry::getKey, e -> e.getValue().activeProducers())));
}
private Mono<Void> incrementalAlterConfig(String topicName,
List<ConfigEntry> currentConfigs,
Map<String, String> newConfigs) {

View file

@ -39,7 +39,6 @@ import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.NewPartitionReassignment;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.ProducerState;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
@ -460,11 +459,6 @@ public class TopicsService {
);
}
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(KafkaCluster cluster, String topic) {
return adminClientService.get(cluster)
.flatMap(ac -> ac.getActiveProducersState(topic));
}
private Mono<List<String>> filterExisting(KafkaCluster cluster, Collection<String> topics) {
return adminClientService.get(cluster)
.flatMap(ac -> ac.listTopics(true))

View file

@ -1,6 +1,6 @@
package com.provectus.kafka.ui.service.analyze;
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
import com.provectus.kafka.ui.emitter.SeekOperations;
@ -14,6 +14,7 @@ import java.io.Closeable;
import java.time.Duration;
import java.time.Instant;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
@ -104,7 +105,8 @@ public class TopicAnalysisService {
consumer.partitionsFor(topicId.topicName)
.forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats()));
var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null));
var seekOperations =
SeekOperations.create(consumer, new ConsumerPosition(EARLIEST, topicId.topicName, List.of(), null, null));
long summaryOffsetsRange = seekOperations.summaryOffsetsRange();
seekOperations.assignAndSeekNonEmptyPartitions();

View file

@ -8,7 +8,7 @@ import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections.CollectionUtils;
import org.opendatadiscovery.oddrn.JdbcUrlParser;
import org.opendatadiscovery.oddrn.model.HivePath;
import org.opendatadiscovery.oddrn.model.MysqlPath;

View file

@ -11,9 +11,6 @@ import org.apache.kafka.common.Node;
class WellKnownMetrics {
private static final String BROKER_TOPIC_METRICS = "BrokerTopicMetrics";
private static final String FIFTEEN_MINUTE_RATE = "FifteenMinuteRate";
// per broker
final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
@ -39,15 +36,15 @@ class WellKnownMetrics {
if (!brokerBytesInFifteenMinuteRate.containsKey(node.id())
&& rawMetric.labels().size() == 1
&& "BytesInPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
&& containsIgnoreCase(name, "BrokerTopicMetrics")
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
brokerBytesInFifteenMinuteRate.put(node.id(), rawMetric.value());
}
if (!brokerBytesOutFifteenMinuteRate.containsKey(node.id())
&& rawMetric.labels().size() == 1
&& "BytesOutPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
&& containsIgnoreCase(name, "BrokerTopicMetrics")
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
brokerBytesOutFifteenMinuteRate.put(node.id(), rawMetric.value());
}
}
@ -56,8 +53,8 @@ class WellKnownMetrics {
String name = rawMetric.name();
String topic = rawMetric.labels().get("topic");
if (topic != null
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
&& containsIgnoreCase(name, "BrokerTopicMetrics")
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
String nameProperty = rawMetric.labels().get("name");
if ("BytesInPerSec".equalsIgnoreCase(nameProperty)) {
bytesInFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));

View file

@ -33,7 +33,7 @@ import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.core.env.Environment;
@ -52,7 +52,6 @@ import reactor.core.publisher.Mono;
public class AccessControlService {
private static final String ACCESS_DENIED = "Access denied";
private static final String ACTIONS_ARE_EMPTY = "actions are empty";
@Nullable
private final InMemoryReactiveClientRegistrationRepository clientRegistrationRepository;
@ -207,7 +206,7 @@ public class AccessControlService {
if (context.getTopic() == null && context.getTopicActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getTopicActions().isEmpty(), ACTIONS_ARE_EMPTY);
Assert.isTrue(!context.getTopicActions().isEmpty(), "actions are empty");
Set<String> requiredActions = context.getTopicActions()
.stream()
@ -244,7 +243,7 @@ public class AccessControlService {
if (context.getConsumerGroup() == null && context.getConsumerGroupActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getConsumerGroupActions().isEmpty(), ACTIONS_ARE_EMPTY);
Assert.isTrue(!context.getConsumerGroupActions().isEmpty(), "actions are empty");
Set<String> requiredActions = context.getConsumerGroupActions()
.stream()
@ -277,7 +276,7 @@ public class AccessControlService {
if (context.getSchema() == null && context.getSchemaActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getSchemaActions().isEmpty(), ACTIONS_ARE_EMPTY);
Assert.isTrue(!context.getSchemaActions().isEmpty(), "actions are empty");
Set<String> requiredActions = context.getSchemaActions()
.stream()
@ -310,7 +309,7 @@ public class AccessControlService {
if (context.getConnect() == null && context.getConnectActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getConnectActions().isEmpty(), ACTIONS_ARE_EMPTY);
Assert.isTrue(!context.getConnectActions().isEmpty(), "actions are empty");
Set<String> requiredActions = context.getConnectActions()
.stream()

View file

@ -59,8 +59,8 @@ public class CognitoAuthorityExtractor implements ProviderAuthorityExtractor {
.stream()
.filter(s -> s.getProvider().equals(Provider.OAUTH_COGNITO))
.filter(s -> s.getType().equals("group"))
.anyMatch(subject -> groups
.stream()
.anyMatch(subject -> Stream.of(groups)
.map(Object::toString)
.anyMatch(cognitoGroup -> cognitoGroup.equals(subject.getValue()))
))
.map(Role::getName)

View file

@ -18,10 +18,6 @@ import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor(access = PRIVATE)
public class ApplicationMetrics {
// kafka-ui specific metrics prefix. Added to make it easier to distinguish kui metrics from
// other metrics, exposed by spring boot (like http stats, jvm, etc.)
private static final String COMMON_PREFIX = "kui_";
private final String clusterName;
private final MeterRegistry registry;
@ -44,7 +40,7 @@ public class ApplicationMetrics {
}
private Counter polledRecords(String topic) {
return Counter.builder(COMMON_PREFIX + "topic_records_polled")
return Counter.builder("topic_records_polled")
.description("Number of records polled from topic")
.tag("cluster", clusterName)
.tag("topic", topic)
@ -52,7 +48,7 @@ public class ApplicationMetrics {
}
private DistributionSummary polledBytes(String topic) {
return DistributionSummary.builder(COMMON_PREFIX + "topic_polled_bytes")
return DistributionSummary.builder("topic_polled_bytes")
.description("Bytes polled from kafka topic")
.tag("cluster", clusterName)
.tag("topic", topic)
@ -60,7 +56,7 @@ public class ApplicationMetrics {
}
private Timer pollTimer(String topic) {
return Timer.builder(COMMON_PREFIX + "topic_poll_time")
return Timer.builder("topic_poll_time")
.description("Time spend in polling for topic")
.tag("cluster", clusterName)
.tag("topic", topic)
@ -68,7 +64,7 @@ public class ApplicationMetrics {
}
private Counter pollThrottlingActivations() {
return Counter.builder(COMMON_PREFIX + "poll_throttling_activations")
return Counter.builder("poll_throttling_activations")
.description("Number of poll throttling activations")
.tag("cluster", clusterName)
.register(registry);
@ -76,7 +72,7 @@ public class ApplicationMetrics {
public AtomicInteger activeConsumers() {
var count = new AtomicInteger();
Gauge.builder(COMMON_PREFIX + "active_consumers", () -> count)
Gauge.builder("active_consumers", () -> count)
.description("Number of active consumers")
.tag("cluster", clusterName)
.register(registry);

View file

@ -45,7 +45,6 @@ import reactor.core.publisher.Mono;
public class DynamicConfigOperations {
static final String DYNAMIC_CONFIG_ENABLED_ENV_PROPERTY = "dynamic.config.enabled";
static final String FILTERING_GROOVY_ENABLED_PROPERTY = "filtering.groovy.enabled";
static final String DYNAMIC_CONFIG_PATH_ENV_PROPERTY = "dynamic.config.path";
static final String DYNAMIC_CONFIG_PATH_ENV_PROPERTY_DEFAULT = "/etc/kafkaui/dynamic_config.yaml";
@ -65,10 +64,6 @@ public class DynamicConfigOperations {
return "true".equalsIgnoreCase(ctx.getEnvironment().getProperty(DYNAMIC_CONFIG_ENABLED_ENV_PROPERTY));
}
public boolean filteringGroovyEnabled() {
return "true".equalsIgnoreCase(ctx.getEnvironment().getProperty(FILTERING_GROOVY_ENABLED_PROPERTY));
}
private Path dynamicConfigFilePath() {
return Paths.get(
Optional.ofNullable(ctx.getEnvironment().getProperty(DYNAMIC_CONFIG_PATH_ENV_PROPERTY))
@ -152,14 +147,6 @@ public class DynamicConfigOperations {
.onErrorMap(th -> new FileUploadException(targetFilePath, th));
}
public void checkIfFilteringGroovyEnabled() {
if (!filteringGroovyEnabled()) {
throw new ValidationException(
"Groovy filters is not allowed. "
+ "Set filtering.groovy.enabled property to 'true' to enabled it.");
}
}
private void checkIfDynamicConfigEnabled() {
if (!dynamicConfigEnabled()) {
throw new ValidationException(

View file

@ -5,5 +5,4 @@ package com.provectus.kafka.ui.util.annotation;
* should be marked with this annotation to make further update process easier.
*/
public @interface KafkaClientInternalsDependant {
String value() default "";
}

View file

@ -43,8 +43,6 @@ public class JsonAvroConversion {
private static final JsonMapper MAPPER = new JsonMapper();
private static final Schema NULL_SCHEMA = Schema.create(Schema.Type.NULL);
private static final String FORMAT = "format";
private static final String DATE_TIME = "date-time";
// converts json into Object that is expected input for KafkaAvroSerializer
// (with AVRO_USE_LOGICAL_TYPE_CONVERTERS flat enabled!)
@ -349,7 +347,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode("uuid"))))
Map.of("format", new TextNode("uuid"))))
),
DECIMAL("decimal",
@ -387,7 +385,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode("date"))))
Map.of("format", new TextNode("date"))))
),
TIME_MILLIS("time-millis",
@ -408,7 +406,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode("time"))))
Map.of("format", new TextNode("time"))))
),
TIME_MICROS("time-micros",
@ -429,7 +427,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode("time"))))
Map.of("format", new TextNode("time"))))
),
TIMESTAMP_MILLIS("timestamp-millis",
@ -450,7 +448,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode(DATE_TIME))))
Map.of("format", new TextNode("date-time"))))
),
TIMESTAMP_MICROS("timestamp-micros",
@ -475,7 +473,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode(DATE_TIME))))
Map.of("format", new TextNode("date-time"))))
),
LOCAL_TIMESTAMP_MILLIS("local-timestamp-millis",
@ -493,7 +491,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode(DATE_TIME))))
Map.of("format", new TextNode("date-time"))))
),
LOCAL_TIMESTAMP_MICROS("local-timestamp-micros",
@ -510,7 +508,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of(FORMAT, new TextNode(DATE_TIME))))
Map.of("format", new TextNode("date-time"))))
);
private final String name;

View file

@ -37,9 +37,6 @@ import reactor.util.function.Tuples;
public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.Descriptor> {
private static final String MAXIMUM = "maximum";
private static final String MINIMUM = "minimum";
private final Set<String> simpleTypesWrapperNames = Set.of(
BoolValue.getDescriptor().getFullName(),
Int32Value.getDescriptor().getFullName(),
@ -159,15 +156,15 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
case INT32, FIXED32, SFIXED32, SINT32 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
MAXIMUM, IntNode.valueOf(Integer.MAX_VALUE),
MINIMUM, IntNode.valueOf(Integer.MIN_VALUE)
"maximum", IntNode.valueOf(Integer.MAX_VALUE),
"minimum", IntNode.valueOf(Integer.MIN_VALUE)
)
);
case UINT32 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
MAXIMUM, LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
MINIMUM, IntNode.valueOf(0)
"maximum", LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
"minimum", IntNode.valueOf(0)
)
);
//TODO: actually all *64 types will be printed with quotes (as strings),
@ -176,15 +173,15 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
case INT64, FIXED64, SFIXED64, SINT64 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
MAXIMUM, LongNode.valueOf(Long.MAX_VALUE),
MINIMUM, LongNode.valueOf(Long.MIN_VALUE)
"maximum", LongNode.valueOf(Long.MAX_VALUE),
"minimum", LongNode.valueOf(Long.MIN_VALUE)
)
);
case UINT64 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
MAXIMUM, new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
MINIMUM, LongNode.valueOf(0)
"maximum", new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
"minimum", LongNode.valueOf(0)
)
);
case MESSAGE, GROUP -> new SimpleJsonType(JsonType.Type.OBJECT);

View file

@ -56,7 +56,7 @@ public class KafkaConsumerTests extends AbstractIntegrationTest {
}
long count = webTestClient.get()
.uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
.uri("/api/clusters/{clusterName}/topics/{topicName}/messages/v2?m=EARLIEST", LOCAL, topicName)
.accept(TEXT_EVENT_STREAM)
.exchange()
.expectStatus()

View file

@ -0,0 +1,195 @@
package com.provectus.kafka.ui.emitter;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.producer.KafkaTestProducer;
import com.provectus.kafka.ui.serde.api.Serde;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
import com.provectus.kafka.ui.service.PollingCursorsStorage;
import com.provectus.kafka.ui.util.ApplicationMetrics;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.function.Consumer;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import reactor.core.publisher.Flux;
import reactor.test.StepVerifier;
class CursorTest extends AbstractIntegrationTest {
static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID();
static final int MSGS_IN_PARTITION = 20;
static final int PAGE_SIZE = (MSGS_IN_PARTITION / 2) + 1; //to poll fill data set in 2 iterations
final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
@BeforeAll
static void setup() {
createTopic(new NewTopic(TOPIC, 1, (short) 1));
try (var producer = KafkaTestProducer.forKafka(kafka)) {
for (int i = 0; i < MSGS_IN_PARTITION; i++) {
producer.send(new ProducerRecord<>(TOPIC, "msg_" + i));
}
}
}
@AfterAll
static void cleanup() {
deleteTopic(TOPIC);
}
@Test
void backwardEmitter() {
var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null);
var emitter = createBackwardEmitter(consumerPosition);
emitMessages(emitter, PAGE_SIZE);
var cursor = assertCursor(
PollingModeDTO.TO_OFFSET,
offsets -> assertThat(offsets)
.hasSize(1)
.containsEntry(new TopicPartition(TOPIC, 0), 9L)
);
// polling remaining records using registered cursor
emitter = createBackwardEmitterWithCursor(cursor);
emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE);
//checking no new cursors registered
assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor);
}
@Test
void forwardEmitter() {
var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null);
var emitter = createForwardEmitter(consumerPosition);
emitMessages(emitter, PAGE_SIZE);
var cursor = assertCursor(
PollingModeDTO.FROM_OFFSET,
offsets -> assertThat(offsets)
.hasSize(1)
.containsEntry(new TopicPartition(TOPIC, 0), 11L)
);
//polling remaining records using registered cursor
emitter = createForwardEmitterWithCursor(cursor);
emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE);
//checking no new cursors registered
assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor);
}
private Cursor assertCursor(PollingModeDTO expectedMode,
Consumer<Map<TopicPartition, Long>> offsetsAssert) {
Cursor registeredCursor = cursorsStorage.asMap().values().stream().findFirst().orElse(null);
assertThat(registeredCursor).isNotNull();
assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE);
assertThat(registeredCursor.deserializer()).isNotNull();
assertThat(registeredCursor.filter()).isNotNull();
var cursorPosition = registeredCursor.consumerPosition();
assertThat(cursorPosition).isNotNull();
assertThat(cursorPosition.topic()).isEqualTo(TOPIC);
assertThat(cursorPosition.partitions()).isEqualTo(List.of());
assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode);
offsetsAssert.accept(cursorPosition.offsets().tpOffsets());
return registeredCursor;
}
private void emitMessages(AbstractEmitter emitter, int expectedCnt) {
StepVerifier.create(
Flux.create(emitter)
.filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(e -> e.getMessage().getContent())
)
.expectNextCount(expectedCnt)
.verifyComplete();
}
private BackwardEmitter createBackwardEmitter(ConsumerPosition position) {
return new BackwardEmitter(
this::createConsumer,
position,
PAGE_SIZE,
createRecordsDeserializer(),
m -> true,
PollingSettings.createDefault(),
createCursor(position)
);
}
private BackwardEmitter createBackwardEmitterWithCursor(Cursor cursor) {
return new BackwardEmitter(
this::createConsumer,
cursor.consumerPosition(),
cursor.limit(),
cursor.deserializer(),
cursor.filter(),
PollingSettings.createDefault(),
createCursor(cursor.consumerPosition())
);
}
private ForwardEmitter createForwardEmitterWithCursor(Cursor cursor) {
return new ForwardEmitter(
this::createConsumer,
cursor.consumerPosition(),
cursor.limit(),
cursor.deserializer(),
cursor.filter(),
PollingSettings.createDefault(),
createCursor(cursor.consumerPosition())
);
}
private ForwardEmitter createForwardEmitter(ConsumerPosition position) {
return new ForwardEmitter(
this::createConsumer,
position,
PAGE_SIZE,
createRecordsDeserializer(),
m -> true,
PollingSettings.createDefault(),
createCursor(position)
);
}
private Cursor.Tracking createCursor(ConsumerPosition position) {
return cursorsStorage.createNewCursor(createRecordsDeserializer(), position, m -> true, PAGE_SIZE);
}
private EnhancedConsumer createConsumer() {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1); // to check multiple polls
return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop());
}
private static ConsumerRecordDeserializer createRecordsDeserializer() {
Serde s = new StringSerde();
s.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty());
return new ConsumerRecordDeserializer(
StringSerde.name(),
s.deserializer(null, Serde.Target.KEY),
StringSerde.name(),
s.deserializer(null, Serde.Target.VALUE),
StringSerde.name(),
s.deserializer(null, Serde.Target.KEY),
s.deserializer(null, Serde.Target.VALUE),
msg -> msg
);
}
}

View file

@ -1,8 +1,13 @@
package com.provectus.kafka.ui.emitter;
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.TAILING;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -14,6 +19,8 @@ import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
class SeekOperationsTest {
@ -40,13 +47,22 @@ class SeekOperationsTest {
@Nested
class GetOffsetsForSeek {
@Test
void tailing() {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
new ConsumerPosition(TAILING, topic, List.of(), null, null)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
}
@Test
void latest() {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
SeekTypeDTO.LATEST,
null
new ConsumerPosition(LATEST, topic, List.of(), null, null)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L));
}
@ -56,33 +72,38 @@ class SeekOperationsTest {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
SeekTypeDTO.BEGINNING,
null
new ConsumerPosition(EARLIEST, topic, List.of(), null, null)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L));
}
@Test
void offsets() {
@ParameterizedTest
@CsvSource({"TO_OFFSET", "FROM_OFFSET"})
void offsets(PollingModeDTO mode) {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
SeekTypeDTO.OFFSET,
Map.of(tp1, 10L, tp2, 10L, tp3, 26L)
new ConsumerPosition(
mode, topic, List.of(tp1, tp2, tp3), null,
new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 10L, tp3, 26L))
)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L));
}
@Test
void offsetsWithBoundsFixing() {
@ParameterizedTest
@CsvSource({"TO_OFFSET", "FROM_OFFSET"})
void offsetsWithBoundsFixing(PollingModeDTO mode) {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
SeekTypeDTO.OFFSET,
Map.of(tp1, 10L, tp2, 21L, tp3, 24L)
new ConsumerPosition(
mode, topic, List.of(tp1, tp2, tp3), null,
new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 21L, tp3, 24L))
)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L));
}
}
}
}

View file

@ -4,10 +4,9 @@ import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
import com.provectus.kafka.ui.service.ClustersStorage;
import com.provectus.kafka.ui.service.MessagesService;
import java.time.Duration;
@ -111,13 +110,12 @@ class TailingEmitterTest extends AbstractIntegrationTest {
return applicationContext.getBean(MessagesService.class)
.loadMessages(cluster, topicName,
new ConsumerPosition(SeekTypeDTO.LATEST, topic, null),
new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null),
query,
MessageFilterTypeDTO.STRING_CONTAINS,
null,
0,
SeekDirectionDTO.TAILING,
"String",
"String");
StringSerde.name(),
StringSerde.name());
}
private List<TopicMessageEventDTO> startTailing(String filterQuery) {

View file

@ -8,19 +8,24 @@ import com.provectus.kafka.ui.exception.TopicNotFoundException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.producer.KafkaTestProducer;
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.clients.admin.NewTopic;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import org.springframework.beans.factory.annotation.Autowired;
import reactor.core.publisher.Flux;
import reactor.test.StepVerifier;
@ -35,6 +40,8 @@ class MessagesServiceTest extends AbstractIntegrationTest {
KafkaCluster cluster;
Set<String> createdTopics = new HashSet<>();
@BeforeEach
void init() {
cluster = applicationContext
@ -43,6 +50,11 @@ class MessagesServiceTest extends AbstractIntegrationTest {
.get();
}
@AfterEach
void deleteCreatedTopics() {
createdTopics.forEach(MessagesServiceTest::deleteTopic);
}
@Test
void deleteTopicMessagesReturnsExceptionWhenTopicNotFound() {
StepVerifier.create(messagesService.deleteTopicMessages(cluster, NON_EXISTING_TOPIC, List.of()))
@ -60,7 +72,9 @@ class MessagesServiceTest extends AbstractIntegrationTest {
@Test
void loadMessagesReturnsExceptionWhenTopicNotFound() {
StepVerifier.create(messagesService
.loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String"))
.loadMessages(cluster, NON_EXISTING_TOPIC,
new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null),
null, null, 1, "String", "String"))
.expectError(TopicNotFoundException.class)
.verify();
}
@ -68,32 +82,84 @@ class MessagesServiceTest extends AbstractIntegrationTest {
@Test
void maskingAppliedOnConfiguredClusters() throws Exception {
String testTopic = MASKED_TOPICS_PREFIX + UUID.randomUUID();
createTopicWithCleanup(new NewTopic(testTopic, 1, (short) 1));
try (var producer = KafkaTestProducer.forKafka(kafka)) {
createTopic(new NewTopic(testTopic, 1, (short) 1));
producer.send(testTopic, "message1");
producer.send(testTopic, "message2").get();
Flux<TopicMessageDTO> msgsFlux = messagesService.loadMessages(
cluster,
testTopic,
new ConsumerPosition(SeekTypeDTO.BEGINNING, testTopic, null),
null,
null,
100,
SeekDirectionDTO.FORWARD,
StringSerde.name(),
StringSerde.name()
).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(TopicMessageEventDTO::getMessage);
// both messages should be masked
StepVerifier.create(msgsFlux)
.expectNextMatches(msg -> msg.getContent().equals("***"))
.expectNextMatches(msg -> msg.getContent().equals("***"))
.verifyComplete();
} finally {
deleteTopic(testTopic);
}
Flux<TopicMessageDTO> msgsFlux = messagesService.loadMessages(
cluster,
testTopic,
new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null),
null,
null,
100,
StringSerde.name(),
StringSerde.name()
).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(TopicMessageEventDTO::getMessage);
// both messages should be masked
StepVerifier.create(msgsFlux)
.expectNextMatches(msg -> msg.getContent().equals("***"))
.expectNextMatches(msg -> msg.getContent().equals("***"))
.verifyComplete();
}
@ParameterizedTest
@CsvSource({"EARLIEST", "LATEST"})
void cursorIsRegisteredAfterPollingIsDoneAndCanBeUsedForNextPagePolling(PollingModeDTO mode) {
String testTopic = MessagesServiceTest.class.getSimpleName() + UUID.randomUUID();
createTopicWithCleanup(new NewTopic(testTopic, 5, (short) 1));
int msgsToGenerate = 100;
int pageSize = (msgsToGenerate / 2) + 1;
try (var producer = KafkaTestProducer.forKafka(kafka)) {
for (int i = 0; i < msgsToGenerate; i++) {
producer.send(testTopic, "message_" + i);
}
}
var cursorIdCatcher = new AtomicReference<String>();
Flux<String> msgsFlux = messagesService.loadMessages(
cluster, testTopic,
new ConsumerPosition(mode, testTopic, List.of(), null, null),
null, null, pageSize, StringSerde.name(), StringSerde.name())
.doOnNext(evt -> {
if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) {
assertThat(evt.getCursor()).isNotNull();
cursorIdCatcher.set(evt.getCursor().getId());
}
})
.filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(evt -> evt.getMessage().getContent());
StepVerifier.create(msgsFlux)
.expectNextCount(pageSize)
.verifyComplete();
assertThat(cursorIdCatcher.get()).isNotNull();
Flux<String> remainingMsgs = messagesService.loadMessages(cluster, testTopic, cursorIdCatcher.get())
.doOnNext(evt -> {
if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) {
assertThat(evt.getCursor()).isNull();
}
})
.filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(evt -> evt.getMessage().getContent());
StepVerifier.create(remainingMsgs)
.expectNextCount(msgsToGenerate - pageSize)
.verifyComplete();
}
private void createTopicWithCleanup(NewTopic newTopic) {
createTopic(newTopic);
createdTopics.add(newTopic.name());
}
@Test

View file

@ -1,13 +1,16 @@
package com.provectus.kafka.ui.service;
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST;
import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_OFFSET;
import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_TIMESTAMP;
import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.TO_OFFSET;
import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.emitter.BackwardEmitter;
import com.provectus.kafka.ui.emitter.Cursor;
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
import com.provectus.kafka.ui.emitter.ForwardEmitter;
import com.provectus.kafka.ui.emitter.PollingSettings;
@ -43,6 +46,7 @@ import org.apache.kafka.common.header.internals.RecordHeader;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import reactor.test.StepVerifier;
@ -57,16 +61,18 @@ class RecordEmitterTest extends AbstractIntegrationTest {
static final String EMPTY_TOPIC = TOPIC + "_empty";
static final List<Record> SENT_RECORDS = new ArrayList<>();
static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer();
static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class);
static final Predicate<TopicMessageDTO> NOOP_FILTER = m -> true;
@BeforeAll
static void generateMsgs() throws Exception {
createTopic(new NewTopic(TOPIC, PARTITIONS, (short) 1));
createTopic(new NewTopic(EMPTY_TOPIC, PARTITIONS, (short) 1));
long startTs = System.currentTimeMillis();
try (var producer = KafkaTestProducer.forKafka(kafka)) {
for (int partition = 0; partition < PARTITIONS; partition++) {
for (int i = 0; i < MSGS_PER_PARTITION; i++) {
long ts = System.currentTimeMillis() + i;
long ts = (startTs += 100);
var value = "msg_" + partition + "_" + i;
var metadata = producer.send(
new ProducerRecord<>(
@ -115,20 +121,22 @@ class RecordEmitterTest extends AbstractIntegrationTest {
void pollNothingOnEmptyTopic() {
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
100,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
100,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
StepVerifier.create(Flux.create(forwardEmitter))
@ -148,20 +156,22 @@ class RecordEmitterTest extends AbstractIntegrationTest {
void pollFullTopicFromBeginning() {
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(BEGINNING, TOPIC, null),
new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(LATEST, TOPIC, null),
new ConsumerPosition(LATEST, TOPIC, List.of(), null, null),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
@ -180,20 +190,24 @@ class RecordEmitterTest extends AbstractIntegrationTest {
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
new ConsumerPosition.Offsets(null, targetOffsets)),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
new ConsumerPosition.Offsets(null, targetOffsets)),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
var expectedValues = SENT_RECORDS.stream()
@ -213,50 +227,45 @@ class RecordEmitterTest extends AbstractIntegrationTest {
@Test
void pollWithTimestamps() {
Map<TopicPartition, Long> targetTimestamps = new HashMap<>();
final Map<TopicPartition, List<Record>> perPartition =
SENT_RECORDS.stream().collect(Collectors.groupingBy((r) -> r.tp));
for (int i = 0; i < PARTITIONS; i++) {
final List<Record> records = perPartition.get(new TopicPartition(TOPIC, i));
int randRecordIdx = ThreadLocalRandom.current().nextInt(records.size());
log.info("partition: {} position: {}", i, randRecordIdx);
targetTimestamps.put(
new TopicPartition(TOPIC, i),
records.get(randRecordIdx).getTimestamp()
);
}
var tsStats = SENT_RECORDS.stream().mapToLong(Record::getTimestamp).summaryStatistics();
//choosing ts in the middle
long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2);
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
expectEmitter(
forwardEmitter,
SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() >= targetTimestamp)
.map(Record::getValue)
.collect(Collectors.toList())
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
var expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp()))
.map(Record::getValue)
.collect(Collectors.toList());
expectEmitter(forwardEmitter, expectedValues);
expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp()))
.map(Record::getValue)
.collect(Collectors.toList());
expectEmitter(backwardEmitter, expectedValues);
expectEmitter(
backwardEmitter,
SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() < targetTimestamp)
.map(Record::getValue)
.collect(Collectors.toList())
);
}
@Test
@ -269,11 +278,13 @@ class RecordEmitterTest extends AbstractIntegrationTest {
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
new ConsumerPosition.Offsets(null, targetOffsets)),
numMessages,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
var expectedValues = SENT_RECORDS.stream()
@ -296,11 +307,13 @@ class RecordEmitterTest extends AbstractIntegrationTest {
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(OFFSET, TOPIC, offsets),
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null,
new ConsumerPosition.Offsets(null, offsets)),
100,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault()
PollingSettings.createDefault(),
CURSOR_MOCK
);
expectEmitter(backwardEmitter,

View file

@ -7,8 +7,7 @@ import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
@ -20,6 +19,7 @@ import io.confluent.kafka.schemaregistry.avro.AvroSchema;
import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.UUID;
@ -500,15 +500,10 @@ public class SendAndReadTests extends AbstractIntegrationTest {
TopicMessageDTO polled = messagesService.loadMessages(
targetCluster,
topic,
new ConsumerPosition(
SeekTypeDTO.BEGINNING,
topic,
Map.of(new TopicPartition(topic, 0), 0L)
),
new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null),
null,
null,
1,
SeekDirectionDTO.FORWARD,
msgToSend.getKeySerde().get(),
msgToSend.getValueSerde().get()
).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))

View file

@ -763,12 +763,12 @@ paths:
404:
description: Not found
/api/clusters/{clusterName}/topics/{topicName}/activeproducers:
get:
/api/clusters/{clusterName}/topics/{topicName}/smartfilters:
post:
tags:
- Topics
summary: get producer states for topic
operationId: getActiveProducerStates
- Messages
summary: registerFilter
operationId: registerFilter
parameters:
- name: clusterName
in: path
@ -780,15 +780,101 @@ paths:
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/MessageFilterRegistration'
responses:
200:
description: OK
content:
application/json:
schema:
$ref: '#/components/schemas/MessageFilterId'
/api/clusters/{clusterName}/topics/{topicName}/messages/v2:
get:
tags:
- Messages
summary: getTopicMessagesV2
operationId: getTopicMessagesV2
parameters:
- name: clusterName
in: path
required: true
schema:
type: string
- name: topicName
in: path
required: true
schema:
type: string
- name: mode
in: query
description: Messages polling mode
required: true
schema:
$ref: "#/components/schemas/PollingMode"
- name: partitions
in: query
schema:
type: array
description: List of target partitions (all partitions if not provided)
items:
type: integer
- name: limit
in: query
description: Max number of messages can be returned
schema:
type: integer
- name: stringFilter
in: query
description: query string to contains string filtration
schema:
type: string
- name: smartFilterId
in: query
description: filter id, that was registered beforehand
schema:
type: string
- name: offset
in: query
description: message offset to read from / to
schema:
type: integer
format: int64
- name: timestamp
in: query
description: timestamp (in ms) to read from / to
schema:
type: integer
format: int64
- name: keySerde
in: query
description: "Serde that should be used for deserialization. Will be chosen automatically if not set."
schema:
type: string
- name: valueSerde
in: query
description: "Serde that should be used for deserialization. Will be chosen automatically if not set."
schema:
type: string
- name: cursor
in: query
description: "id of the cursor for pagination"
schema:
type: string
responses:
200:
description: OK
content:
text/event-stream:
schema:
type: array
items:
$ref: '#/components/schemas/TopicProducerState'
$ref: '#/components/schemas/TopicMessageEvent'
/api/clusters/{clusterName}/topics/{topicName}/consumer-groups:
get:
@ -2646,31 +2732,6 @@ components:
- PROTOBUF
- UNKNOWN
TopicProducerState:
type: object
properties:
partition:
type: integer
format: int32
producerId:
type: integer
format: int64
producerEpoch:
type: integer
format: int32
lastSequence:
type: integer
format: int32
lastTimestampMs:
type: integer
format: int64
coordinatorEpoch:
type: integer
format: int32
currentTransactionStartOffset:
type: integer
format: int64
ConsumerGroup:
discriminator:
propertyName: inherit
@ -2783,13 +2844,14 @@ components:
- MESSAGE
- CONSUMING
- DONE
- EMIT_THROTTLING
message:
$ref: "#/components/schemas/TopicMessage"
phase:
$ref: "#/components/schemas/TopicMessagePhase"
consuming:
$ref: "#/components/schemas/TopicMessageConsuming"
cursor:
$ref: "#/components/schemas/TopicMessageNextPageCursor"
TopicMessagePhase:
type: object
@ -2819,6 +2881,11 @@ components:
filterApplyErrors:
type: integer
TopicMessageNextPageCursor:
type: object
properties:
id:
type: string
TopicMessage:
type: object
@ -2891,6 +2958,29 @@ components:
- TIMESTAMP
- LATEST
MessageFilterRegistration:
type: object
properties:
filterCode:
type: string
MessageFilterId:
type: object
properties:
id:
type: string
PollingMode:
type: string
enum:
- FROM_OFFSET
- TO_OFFSET
- FROM_TIMESTAMP
- TO_TIMESTAMP
- LATEST
- EARLIEST
- TAILING
MessageFilterType:
type: string
enum:

View file

@ -19,7 +19,7 @@
<selenium.version>4.8.1</selenium.version>
<selenide.version>6.12.3</selenide.version>
<testng.version>7.7.1</testng.version>
<allure.version>2.23.0</allure.version>
<allure.version>2.22.2</allure.version>
<qase.io.version>3.0.5</qase.io.version>
<aspectj.version>1.9.9.1</aspectj.version>
<assertj.version>3.24.2</assertj.version>

View file

@ -10,27 +10,25 @@ import lombok.experimental.Accessors;
@Accessors(chain = true)
public class Schema {
private static final String USER_DIR = "user.dir";
private String name, valuePath;
private SchemaType type;
public static Schema createSchemaAvro() {
return new Schema().setName("schema_avro-" + randomAlphabetic(5))
.setType(SchemaType.AVRO)
.setValuePath(System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_avro_value.json");
.setValuePath(System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_avro_value.json");
}
public static Schema createSchemaJson() {
return new Schema().setName("schema_json-" + randomAlphabetic(5))
.setType(SchemaType.JSON)
.setValuePath(System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_json_Value.json");
.setValuePath(System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_json_Value.json");
}
public static Schema createSchemaProtobuf() {
return new Schema().setName("schema_protobuf-" + randomAlphabetic(5))
.setType(SchemaType.PROTOBUF)
.setValuePath(
System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_protobuf_value.txt");
System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_protobuf_value.txt");
}
}

View file

@ -16,8 +16,6 @@ import java.util.stream.Stream;
public class BrokersConfigTab extends BasePage {
protected List<SelenideElement> editBtn = $$x("//button[@aria-label='editAction']");
protected SelenideElement searchByKeyField = $x("//input[@placeholder='Search by Key or Value']");
protected SelenideElement sourceInfoIcon = $x("//div[text()='Source']/..//div/div[@class]");
protected SelenideElement sourceInfoTooltip = $x("//div[text()='Source']/..//div/div[@style]");
protected ElementsCollection editBtns = $$x("//button[@aria-label='editAction']");

View file

@ -19,8 +19,6 @@ import io.qameta.allure.Step;
public class TopicCreateEditForm extends BasePage {
private static final String RETENTION_BYTES = "retentionBytes";
protected SelenideElement timeToRetainField = $x("//input[@id='timeToRetain']");
protected SelenideElement partitionsField = $x("//input[@name='partitions']");
protected SelenideElement nameField = $(id("topicFormName"));
@ -140,12 +138,12 @@ public class TopicCreateEditForm extends BasePage {
@Step
public TopicCreateEditForm selectRetentionBytes(String visibleValue) {
return selectFromDropDownByVisibleText(RETENTION_BYTES, visibleValue);
return selectFromDropDownByVisibleText("retentionBytes", visibleValue);
}
@Step
public TopicCreateEditForm selectRetentionBytes(Long optionValue) {
return selectFromDropDownByOptionValue(RETENTION_BYTES, optionValue.toString());
return selectFromDropDownByOptionValue("retentionBytes", optionValue.toString());
}
@Step
@ -204,7 +202,7 @@ public class TopicCreateEditForm extends BasePage {
@Step
public String getMaxSizeOnDisk() {
return new KafkaUiSelectElement(RETENTION_BYTES).getCurrentValue();
return new KafkaUiSelectElement("retentionBytes").getCurrentValue();
}
@Step

View file

@ -1 +1 @@
v18.17.1
v16.15.0

View file

@ -86,7 +86,7 @@
"eslint": "^8.3.0",
"eslint-config-airbnb": "^19.0.4",
"eslint-config-airbnb-typescript": "^17.0.0",
"eslint-config-prettier": "^9.0.0",
"eslint-config-prettier": "^8.5.0",
"eslint-import-resolver-node": "^0.3.6",
"eslint-import-resolver-typescript": "^3.2.7",
"eslint-plugin-import": "^2.26.0",
@ -106,7 +106,7 @@
"vite-plugin-ejs": "^1.6.4"
},
"engines": {
"node": "v18.17.1",
"pnpm": "^8.6.12"
"node": "v16.15.0",
"pnpm": "^7.4.0"
}
}

File diff suppressed because it is too large Load diff

View file

@ -34,19 +34,14 @@ const Configs: React.FC = () => {
const getData = () => {
return data
.filter((item) => {
const nameMatch = item.name
.toLocaleLowerCase()
.includes(keyword.toLocaleLowerCase());
return nameMatch
? true
: item.value &&
item.value
.toLocaleLowerCase()
.includes(keyword.toLocaleLowerCase()); // try to match the keyword on any of the item.value elements when nameMatch fails but item.value exists
})
.filter(
(item) =>
item.name.toLocaleLowerCase().indexOf(keyword.toLocaleLowerCase()) >
-1
)
.sort((a, b) => {
if (a.source === b.source) return 0;
return a.source === ConfigSource.DYNAMIC_BROKER_CONFIG ? -1 : 1;
});
};
@ -100,7 +95,7 @@ const Configs: React.FC = () => {
<S.SearchWrapper>
<Search
onChange={setKeyword}
placeholder="Search by Key or Value"
placeholder="Search by Key"
value={keyword}
/>
</S.SearchWrapper>

View file

@ -13,7 +13,7 @@ import { brokersPayload } from 'lib/fixtures/brokers';
import { clusterStatsPayload } from 'lib/fixtures/clusters';
const clusterName = 'local';
const brokerId = 200;
const brokerId = 1;
const activeClassName = 'is-active';
const brokerLogdir = {
pageText: 'brokerLogdir',

View file

@ -73,13 +73,13 @@ const BrokersList: React.FC = () => {
header: 'Broker ID',
accessorKey: 'brokerId',
// eslint-disable-next-line react/no-unstable-nested-components
cell: ({ getValue }) => (
cell: ({ row: { id }, getValue }) => (
<S.RowCell>
<LinkCell
value={`${getValue<string | number>()}`}
to={encodeURIComponent(`${getValue<string | number>()}`)}
/>
{getValue<string | number>() === activeControllers && (
{id === String(activeControllers) && (
<Tooltip
value={<CheckMarkRoundIcon />}
content="Active Controller"

View file

@ -56,11 +56,11 @@ describe('BrokersList Component', () => {
});
it('opens broker when row clicked', async () => {
renderComponent();
await userEvent.click(screen.getByRole('cell', { name: '100' }));
await userEvent.click(screen.getByRole('cell', { name: '0' }));
await waitFor(() =>
expect(mockedUsedNavigate).toBeCalledWith(
clusterBrokerPath(clusterName, '100')
clusterBrokerPath(clusterName, '0')
)
);
});
@ -124,39 +124,6 @@ describe('BrokersList Component', () => {
});
});
describe('BrokersList', () => {
describe('when the brokers are loaded', () => {
const testActiveControllers = 0;
beforeEach(() => {
(useBrokers as jest.Mock).mockImplementation(() => ({
data: brokersPayload,
}));
(useClusterStats as jest.Mock).mockImplementation(() => ({
data: clusterStatsPayload,
}));
});
it(`Indicates correct active cluster`, async () => {
renderComponent();
await waitFor(() =>
expect(screen.getByRole('tooltip')).toBeInTheDocument()
);
});
it(`Correct display even if there is no active cluster: ${testActiveControllers} `, async () => {
(useClusterStats as jest.Mock).mockImplementation(() => ({
data: {
...clusterStatsPayload,
activeControllers: testActiveControllers,
},
}));
renderComponent();
await waitFor(() =>
expect(screen.queryByRole('tooltip')).not.toBeInTheDocument()
);
});
});
});
describe('when diskUsage is empty', () => {
beforeEach(() => {
(useBrokers as jest.Mock).mockImplementation(() => ({
@ -190,11 +157,11 @@ describe('BrokersList Component', () => {
});
it('opens broker when row clicked', async () => {
renderComponent();
await userEvent.click(screen.getByRole('cell', { name: '100' }));
await userEvent.click(screen.getByRole('cell', { name: '1' }));
await waitFor(() =>
expect(mockedUsedNavigate).toBeCalledWith(
clusterBrokerPath(clusterName, '100')
clusterBrokerPath(clusterName, '1')
)
);
});

View file

@ -15,7 +15,7 @@ enum Filters {
PARTITION_COUNT = 'partitionCount',
REPLICATION_FACTOR = 'replicationFactor',
INSYNC_REPLICAS = 'inSyncReplicas',
CLEANUP_POLICY = 'cleanUpPolicy',
CLEANUP_POLICY = 'Delete',
}
const New: React.FC = () => {

View file

@ -60,16 +60,16 @@ describe('New', () => {
await userEvent.clear(screen.getByPlaceholderText('Topic Name'));
await userEvent.tab();
await expect(
screen.getByText('Topic Name is required')
screen.getByText('name is a required field')
).toBeInTheDocument();
await userEvent.type(
screen.getByLabelText('Number of Partitions *'),
screen.getByLabelText('Number of partitions *'),
minValue
);
await userEvent.clear(screen.getByLabelText('Number of Partitions *'));
await userEvent.clear(screen.getByLabelText('Number of partitions *'));
await userEvent.tab();
await expect(
screen.getByText('Number of Partitions is required and must be a number')
screen.getByText('Number of partitions is required and must be a number')
).toBeInTheDocument();
expect(createTopicMock).not.toHaveBeenCalled();
@ -89,7 +89,7 @@ describe('New', () => {
renderComponent(clusterTopicNewPath(clusterName));
await userEvent.type(screen.getByPlaceholderText('Topic Name'), topicName);
await userEvent.type(
screen.getByLabelText('Number of Partitions *'),
screen.getByLabelText('Number of partitions *'),
minValue
);
await userEvent.click(screen.getByText('Create topic'));

View file

@ -44,11 +44,9 @@ const Metrics: React.FC = () => {
if (data.progress) {
return (
<S.ProgressContainer>
<S.ProgressPct>
{Math.floor(data.progress.completenessPercent || 0)}%
</S.ProgressPct>
<S.ProgressBarWrapper>
<ProgressBar completed={data.progress.completenessPercent || 0} />
<span> {Math.floor(data.progress.completenessPercent || 0)} %</span>
</S.ProgressBarWrapper>
<ActionButton
onClick={async () => {

View file

@ -42,10 +42,3 @@ export const ProgressBarWrapper = styled.div`
align-items: center;
width: 280px;
`;
export const ProgressPct = styled.span`
font-size: 15px;
font-weight: bold;
line-height: 1.5;
color: ${({ theme }) => theme.statictics.progressPctColor};
`;

View file

@ -1,5 +1,4 @@
import styled from 'styled-components';
import Input from 'components/common/Input/Input';
export const Column = styled.div`
display: flex;
@ -17,10 +16,6 @@ export const CustomParamsHeading = styled.h4`
color: ${({ theme }) => theme.heading.h4};
`;
export const MessageSizeInput = styled(Input)`
min-width: 195px;
`;
export const Label = styled.div`
display: flex;
gap: 16px;

View file

@ -109,12 +109,12 @@ const TopicForm: React.FC<Props> = ({
{!isEditing && (
<div>
<InputLabel htmlFor="topicFormNumberOfPartitions">
Number of Partitions *
Number of partitions *
</InputLabel>
<Input
id="topicFormNumberOfPartitions"
type="number"
placeholder="Number of Partitions"
placeholder="Number of partitions"
min="1"
name="partitions"
positiveOnly
@ -228,7 +228,7 @@ const TopicForm: React.FC<Props> = ({
<InputLabel htmlFor="topicFormMaxMessageBytes">
Maximum message size in bytes
</InputLabel>
<S.MessageSizeInput
<Input
id="topicFormMaxMessageBytes"
type="number"
placeholder="Maximum message size"

View file

@ -37,7 +37,7 @@ describe('TopicForm', () => {
expectByRoleAndNameToBeInDocument('textbox', 'Topic Name *');
expectByRoleAndNameToBeInDocument('spinbutton', 'Number of Partitions *');
expectByRoleAndNameToBeInDocument('spinbutton', 'Number of partitions *');
expectByRoleAndNameToBeInDocument('spinbutton', 'Replication Factor');
expectByRoleAndNameToBeInDocument('spinbutton', 'Min In Sync Replicas');

View file

@ -7,7 +7,6 @@ const CheckMarkRoundIcon: React.FC = () => {
height="14"
viewBox="0 0 14 14"
fill="none"
role="tooltip"
xmlns="http://www.w3.org/2000/svg"
>
<path

View file

@ -1,8 +1,8 @@
import { BrokerConfig, BrokersLogdirs, ConfigSource } from 'generated-sources';
export const brokersPayload = [
{ id: 100, host: 'b-1.test.kafka.amazonaws.com', port: 9092 },
{ id: 200, host: 'b-2.test.kafka.amazonaws.com', port: 9092 },
{ id: 1, host: 'b-1.test.kafka.amazonaws.com', port: 9092 },
{ id: 2, host: 'b-2.test.kafka.amazonaws.com', port: 9092 },
];
const partition = {

View file

@ -32,15 +32,15 @@ export const clustersPayload: Cluster[] = [
export const clusterStatsPayload = {
brokerCount: 2,
activeControllers: 100,
activeControllers: 1,
onlinePartitionCount: 138,
offlinePartitionCount: 0,
inSyncReplicasCount: 239,
outOfSyncReplicasCount: 0,
underReplicatedPartitionCount: 0,
diskUsage: [
{ brokerId: 100, segmentSize: 334567, segmentCount: 245 },
{ brokerId: 200, segmentSize: 12345678, segmentCount: 121 },
{ brokerId: 0, segmentSize: 334567, segmentCount: 245 },
{ brokerId: 1, segmentSize: 12345678, segmentCount: 121 },
],
version: '2.2.1',
};

View file

@ -13,10 +13,10 @@ describe('dateTimeHelpers', () => {
it('should output the correct date', () => {
const date = new Date();
expect(formatTimestamp(date)).toBe(
date.toLocaleString([], { hourCycle: 'h23' })
date.toLocaleString([], { hour12: false })
);
expect(formatTimestamp(date.getTime())).toBe(
date.toLocaleString([], { hourCycle: 'h23' })
date.toLocaleString([], { hour12: false })
);
});
});

Some files were not shown because too many files have changed in this diff Show more