DTO suffix added to all generated models (#872)
Co-authored-by: Ilya Kuramshin <ikuramshin@provectus.com>
This commit is contained in:
parent
962322f20f
commit
da477b2896
67 changed files with 755 additions and 751 deletions
|
@ -3,7 +3,7 @@ package com.provectus.kafka.ui.client;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
|
@ -23,7 +23,7 @@ public class KsqlClient {
|
|||
private final WebClient webClient;
|
||||
private final ObjectMapper mapper;
|
||||
|
||||
public Mono<KsqlCommandResponse> execute(BaseStrategy ksqlStatement) {
|
||||
public Mono<KsqlCommandResponseDTO> execute(BaseStrategy ksqlStatement) {
|
||||
return webClient.post()
|
||||
.uri(ksqlStatement.getUri())
|
||||
.accept(new MediaType("application", "vnd.ksql.v1+json"))
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.BrokersApi;
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.BrokerConfig;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigItem;
|
||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdate;
|
||||
import com.provectus.kafka.ui.model.BrokerMetrics;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirs;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigItemDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import java.util.List;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
@ -24,7 +24,7 @@ public class BrokersController implements BrokersApi {
|
|||
private final ClusterService clusterService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<BrokerMetrics>> getBrokersMetrics(String clusterName, Integer id,
|
||||
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.getBrokerMetrics(clusterName, id)
|
||||
.map(ResponseEntity::ok)
|
||||
|
@ -32,13 +32,13 @@ public class BrokersController implements BrokersApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<Broker>>> getBrokers(String clusterName,
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(clusterService.getBrokers(clusterName)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokersLogdirs>>> getAllBrokersLogdirs(String clusterName,
|
||||
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
|
||||
List<Integer> brokers,
|
||||
ServerWebExchange exchange
|
||||
) {
|
||||
|
@ -46,14 +46,14 @@ public class BrokersController implements BrokersApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerConfig>>> getBrokerConfig(String clusterName, Integer id,
|
||||
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName, Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(clusterService.getBrokerConfig(clusterName, id)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> updateBrokerTopicPartitionLogDir(
|
||||
String clusterName, Integer id, Mono<BrokerLogdirUpdate> brokerLogdir,
|
||||
String clusterName, Integer id, Mono<BrokerLogdirUpdateDTO> brokerLogdir,
|
||||
ServerWebExchange exchange) {
|
||||
return brokerLogdir
|
||||
.flatMap(bld -> clusterService.updateBrokerLogDir(clusterName, id, bld))
|
||||
|
@ -64,7 +64,7 @@ public class BrokersController implements BrokersApi {
|
|||
public Mono<ResponseEntity<Void>> updateBrokerConfigByName(String clusterName,
|
||||
Integer id,
|
||||
String name,
|
||||
Mono<BrokerConfigItem> brokerConfig,
|
||||
Mono<BrokerConfigItemDTO> brokerConfig,
|
||||
ServerWebExchange exchange) {
|
||||
return brokerConfig
|
||||
.flatMap(bci -> clusterService.updateBrokerConfigByName(
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.ClustersApi;
|
||||
import com.provectus.kafka.ui.model.Cluster;
|
||||
import com.provectus.kafka.ui.model.ClusterMetrics;
|
||||
import com.provectus.kafka.ui.model.ClusterStats;
|
||||
import com.provectus.kafka.ui.model.ClusterDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
|
@ -20,7 +20,7 @@ public class ClustersController implements ClustersApi {
|
|||
private final ClusterService clusterService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ClusterMetrics>> getClusterMetrics(String clusterName,
|
||||
public Mono<ResponseEntity<ClusterMetricsDTO>> getClusterMetrics(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.getClusterMetrics(clusterName)
|
||||
.map(ResponseEntity::ok)
|
||||
|
@ -28,7 +28,7 @@ public class ClustersController implements ClustersApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ClusterStats>> getClusterStats(String clusterName,
|
||||
public Mono<ResponseEntity<ClusterStatsDTO>> getClusterStats(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.getClusterStats(clusterName)
|
||||
.map(ResponseEntity::ok)
|
||||
|
@ -36,12 +36,12 @@ public class ClustersController implements ClustersApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<Cluster>>> getClusters(ServerWebExchange exchange) {
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getClusters())));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Cluster>> updateClusterInfo(String clusterName,
|
||||
public Mono<ResponseEntity<ClusterDTO>> updateClusterInfo(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.updateCluster(clusterName).map(ResponseEntity::ok);
|
||||
}
|
||||
|
|
|
@ -5,10 +5,10 @@ import static java.util.stream.Collectors.toMap;
|
|||
import com.provectus.kafka.ui.api.ConsumerGroupsApi;
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupOffsetsReset;
|
||||
import com.provectus.kafka.ui.model.PartitionOffset;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupOffsetsResetDTO;
|
||||
import com.provectus.kafka.ui.model.PartitionOffsetDTO;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
|
@ -39,7 +39,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ConsumerGroupDetails>> getConsumerGroup(
|
||||
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(
|
||||
String clusterName, String consumerGroupId, ServerWebExchange exchange) {
|
||||
return clusterService.getConsumerGroupDetail(clusterName, consumerGroupId)
|
||||
.map(ResponseEntity::ok);
|
||||
|
@ -47,7 +47,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
|||
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroups(String clusterName,
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.getConsumerGroups(clusterName)
|
||||
.map(Flux::fromIterable)
|
||||
|
@ -56,7 +56,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroup>>> getTopicConsumerGroups(
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
return clusterService.getConsumerGroups(clusterName, Optional.of(topicName))
|
||||
.map(Flux::fromIterable)
|
||||
|
@ -67,7 +67,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
|||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> resetConsumerGroupOffsets(String clusterName, String group,
|
||||
Mono<ConsumerGroupOffsetsReset>
|
||||
Mono<ConsumerGroupOffsetsResetDTO>
|
||||
consumerGroupOffsetsReset,
|
||||
ServerWebExchange exchange) {
|
||||
return consumerGroupOffsetsReset.flatMap(reset -> {
|
||||
|
@ -101,7 +101,7 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
|||
);
|
||||
}
|
||||
Map<Integer, Long> offsets = reset.getPartitionsOffsets().stream()
|
||||
.collect(toMap(PartitionOffset::getPartition, PartitionOffset::getOffset));
|
||||
.collect(toMap(PartitionOffsetDTO::getPartition, PartitionOffsetDTO::getOffset));
|
||||
return offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
|
||||
default:
|
||||
return Mono.error(
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.KafkaConnectApi;
|
||||
import com.provectus.kafka.ui.model.Connect;
|
||||
import com.provectus.kafka.ui.model.Connector;
|
||||
import com.provectus.kafka.ui.model.ConnectorAction;
|
||||
import com.provectus.kafka.ui.model.ConnectorPlugin;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
|
||||
import com.provectus.kafka.ui.model.FullConnectorInfo;
|
||||
import com.provectus.kafka.ui.model.NewConnector;
|
||||
import com.provectus.kafka.ui.model.Task;
|
||||
import com.provectus.kafka.ui.model.ConnectDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorActionDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponseDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginDTO;
|
||||
import com.provectus.kafka.ui.model.FullConnectorInfoDTO;
|
||||
import com.provectus.kafka.ui.model.NewConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.TaskDTO;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import java.util.Map;
|
||||
import javax.validation.Valid;
|
||||
|
@ -27,7 +27,7 @@ public class KafkaConnectController implements KafkaConnectApi {
|
|||
private final KafkaConnectService kafkaConnectService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<Connect>>> getConnects(String clusterName,
|
||||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return kafkaConnectService.getConnects(clusterName).map(ResponseEntity::ok);
|
||||
}
|
||||
|
@ -40,15 +40,15 @@ public class KafkaConnectController implements KafkaConnectApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Connector>> createConnector(String clusterName, String connectName,
|
||||
@Valid Mono<NewConnector> connector,
|
||||
public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName,
|
||||
@Valid Mono<NewConnectorDTO> connector,
|
||||
ServerWebExchange exchange) {
|
||||
return kafkaConnectService.createConnector(clusterName, connectName, connector)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Connector>> getConnector(String clusterName, String connectName,
|
||||
public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName,
|
||||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
return kafkaConnectService.getConnector(clusterName, connectName, connectorName)
|
||||
|
@ -65,7 +65,7 @@ public class KafkaConnectController implements KafkaConnectApi {
|
|||
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<FullConnectorInfo>>> getAllConnectors(
|
||||
public Mono<ResponseEntity<Flux<FullConnectorInfoDTO>>> getAllConnectors(
|
||||
String clusterName,
|
||||
String search,
|
||||
ServerWebExchange exchange
|
||||
|
@ -83,7 +83,8 @@ public class KafkaConnectController implements KafkaConnectApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Connector>> setConnectorConfig(String clusterName, String connectName,
|
||||
public Mono<ResponseEntity<ConnectorDTO>> setConnectorConfig(String clusterName,
|
||||
String connectName,
|
||||
String connectorName,
|
||||
@Valid Mono<Object> requestBody,
|
||||
ServerWebExchange exchange) {
|
||||
|
@ -95,16 +96,17 @@ public class KafkaConnectController implements KafkaConnectApi {
|
|||
@Override
|
||||
public Mono<ResponseEntity<Void>> updateConnectorState(String clusterName, String connectName,
|
||||
String connectorName,
|
||||
ConnectorAction action,
|
||||
ConnectorActionDTO action,
|
||||
ServerWebExchange exchange) {
|
||||
return kafkaConnectService.updateConnectorState(clusterName, connectName, connectorName, action)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<Task>>> getConnectorTasks(String clusterName, String connectName,
|
||||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
public Mono<ResponseEntity<Flux<TaskDTO>>> getConnectorTasks(String clusterName,
|
||||
String connectName,
|
||||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity
|
||||
.ok(kafkaConnectService.getConnectorTasks(clusterName, connectName, connectorName)));
|
||||
}
|
||||
|
@ -118,14 +120,14 @@ public class KafkaConnectController implements KafkaConnectApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConnectorPlugin>>> getConnectorPlugins(
|
||||
public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
|
||||
String clusterName, String connectName, ServerWebExchange exchange) {
|
||||
return kafkaConnectService.getConnectorPlugins(clusterName, connectName)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ConnectorPluginConfigValidationResponse>>
|
||||
public Mono<ResponseEntity<ConnectorPluginConfigValidationResponseDTO>>
|
||||
validateConnectorPluginConfig(
|
||||
String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
|
||||
ServerWebExchange exchange) {
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.KsqlApi;
|
||||
import com.provectus.kafka.ui.model.KsqlCommand;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.service.KsqlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
|
@ -18,8 +18,9 @@ public class KsqlController implements KsqlApi {
|
|||
private final KsqlService ksqlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandResponse>> executeKsqlCommand(String clusterName,
|
||||
Mono<KsqlCommand> ksqlCommand,
|
||||
public Mono<ResponseEntity<KsqlCommandResponseDTO>> executeKsqlCommand(String clusterName,
|
||||
Mono<KsqlCommandDTO>
|
||||
ksqlCommand,
|
||||
ServerWebExchange exchange) {
|
||||
return ksqlService.executeKsqlCommand(clusterName, ksqlCommand).map(ResponseEntity::ok);
|
||||
}
|
||||
|
|
|
@ -2,11 +2,11 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.api.MessagesApi;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -41,9 +41,9 @@ public class MessagesController implements MessagesApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicMessageEvent>>> getTopicMessages(
|
||||
String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo,
|
||||
@Valid Integer limit, @Valid String q, @Valid SeekDirection seekDirection,
|
||||
public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessages(
|
||||
String clusterName, String topicName, @Valid SeekTypeDTO seekType, @Valid List<String> seekTo,
|
||||
@Valid Integer limit, @Valid String q, @Valid SeekDirectionDTO seekDirection,
|
||||
ServerWebExchange exchange) {
|
||||
return parseConsumerPosition(topicName, seekType, seekTo, seekDirection)
|
||||
.map(position ->
|
||||
|
@ -54,7 +54,7 @@ public class MessagesController implements MessagesApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicMessageSchema>> getTopicSchema(
|
||||
public Mono<ResponseEntity<TopicMessageSchemaDTO>> getTopicSchema(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
return Mono.just(clusterService.getTopicSchema(clusterName, topicName))
|
||||
.map(ResponseEntity::ok);
|
||||
|
@ -62,7 +62,7 @@ public class MessagesController implements MessagesApi {
|
|||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> sendTopicMessages(
|
||||
String clusterName, String topicName, @Valid Mono<CreateTopicMessage> createTopicMessage,
|
||||
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
|
||||
ServerWebExchange exchange) {
|
||||
return createTopicMessage.flatMap(msg ->
|
||||
clusterService.sendMessage(clusterName, topicName, msg)
|
||||
|
@ -71,7 +71,8 @@ public class MessagesController implements MessagesApi {
|
|||
|
||||
|
||||
private Mono<ConsumerPosition> parseConsumerPosition(
|
||||
String topicName, SeekType seekType, List<String> seekTo, SeekDirection seekDirection) {
|
||||
String topicName, SeekTypeDTO seekType, List<String> seekTo,
|
||||
SeekDirectionDTO seekDirection) {
|
||||
return Mono.justOrEmpty(seekTo)
|
||||
.defaultIfEmpty(Collections.emptyList())
|
||||
.flatMapIterable(Function.identity())
|
||||
|
@ -88,7 +89,7 @@ public class MessagesController implements MessagesApi {
|
|||
);
|
||||
})
|
||||
.collectMap(Pair::getKey, Pair::getValue)
|
||||
.map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING,
|
||||
.map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekTypeDTO.BEGINNING,
|
||||
positions, seekDirection));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.SchemasApi;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevel;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaSubject;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
@ -23,16 +23,16 @@ public class SchemasController implements SchemasApi {
|
|||
private final SchemaRegistryService schemaRegistryService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<CompatibilityCheckResponse>> checkSchemaCompatibility(
|
||||
String clusterName, String subject, @Valid Mono<NewSchemaSubject> newSchemaSubject,
|
||||
public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
|
||||
String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubject,
|
||||
ServerWebExchange exchange) {
|
||||
return schemaRegistryService.checksSchemaCompatibility(clusterName, subject, newSchemaSubject)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SchemaSubject>> createNewSchema(
|
||||
String clusterName, @Valid Mono<NewSchemaSubject> newSchemaSubject,
|
||||
public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
|
||||
String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubject,
|
||||
ServerWebExchange exchange) {
|
||||
return schemaRegistryService
|
||||
.registerNewSchema(clusterName, newSchemaSubject)
|
||||
|
@ -58,15 +58,15 @@ public class SchemasController implements SchemasApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<SchemaSubject>>> getAllVersionsBySubject(
|
||||
public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
|
||||
String clusterName, String subjectName, ServerWebExchange exchange) {
|
||||
Flux<SchemaSubject> schemas =
|
||||
Flux<SchemaSubjectDTO> schemas =
|
||||
schemaRegistryService.getAllVersionsBySubject(clusterName, subjectName);
|
||||
return Mono.just(ResponseEntity.ok(schemas));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<CompatibilityLevel>> getGlobalSchemaCompatibilityLevel(
|
||||
public Mono<ResponseEntity<CompatibilityLevelDTO>> getGlobalSchemaCompatibilityLevel(
|
||||
String clusterName, ServerWebExchange exchange) {
|
||||
return schemaRegistryService.getGlobalSchemaCompatibilityLevel(clusterName)
|
||||
.map(ResponseEntity::ok)
|
||||
|
@ -74,29 +74,29 @@ public class SchemasController implements SchemasApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SchemaSubject>> getLatestSchema(String clusterName, String subject,
|
||||
public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName, String subject,
|
||||
ServerWebExchange exchange) {
|
||||
return schemaRegistryService.getLatestSchemaVersionBySubject(clusterName, subject)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SchemaSubject>> getSchemaByVersion(
|
||||
public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
|
||||
String clusterName, String subject, Integer version, ServerWebExchange exchange) {
|
||||
return schemaRegistryService.getSchemaSubjectByVersion(clusterName, subject, version)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<SchemaSubject>>> getSchemas(String clusterName,
|
||||
public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getSchemas(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
Flux<SchemaSubject> subjects = schemaRegistryService.getAllLatestVersionSchemas(clusterName);
|
||||
Flux<SchemaSubjectDTO> subjects = schemaRegistryService.getAllLatestVersionSchemas(clusterName);
|
||||
return Mono.just(ResponseEntity.ok(subjects));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
|
||||
String clusterName, @Valid Mono<CompatibilityLevel> compatibilityLevel,
|
||||
String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevel,
|
||||
ServerWebExchange exchange) {
|
||||
log.info("Updating schema compatibility globally");
|
||||
return schemaRegistryService.updateSchemaCompatibility(clusterName, compatibilityLevel)
|
||||
|
@ -105,7 +105,7 @@ public class SchemasController implements SchemasApi {
|
|||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
|
||||
String clusterName, String subject, @Valid Mono<CompatibilityLevel> compatibilityLevel,
|
||||
String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevel,
|
||||
ServerWebExchange exchange) {
|
||||
log.info("Updating schema compatibility for subject: {}", subject);
|
||||
return schemaRegistryService.updateSchemaCompatibility(clusterName, subject, compatibilityLevel)
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.TopicsApi;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChange;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponse;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSort;
|
||||
import com.provectus.kafka.ui.model.TopicConfig;
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicDetails;
|
||||
import com.provectus.kafka.ui.model.TopicUpdate;
|
||||
import com.provectus.kafka.ui.model.TopicsResponse;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
||||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import java.util.Optional;
|
||||
import javax.validation.Valid;
|
||||
|
@ -31,8 +31,8 @@ public class TopicsController implements TopicsApi {
|
|||
private final ClusterService clusterService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Topic>> createTopic(
|
||||
String clusterName, @Valid Mono<TopicCreation> topicCreation, ServerWebExchange exchange) {
|
||||
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
||||
String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
|
||||
return clusterService.createTopic(clusterName, topicCreation)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
@ -46,7 +46,7 @@ public class TopicsController implements TopicsApi {
|
|||
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicConfig>>> getTopicConfigs(
|
||||
public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
return Mono.just(
|
||||
clusterService.getTopicConfigs(clusterName, topicName)
|
||||
|
@ -57,7 +57,7 @@ public class TopicsController implements TopicsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDetails>> getTopicDetails(
|
||||
public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
return Mono.just(
|
||||
clusterService.getTopicDetails(clusterName, topicName)
|
||||
|
@ -67,11 +67,11 @@ public class TopicsController implements TopicsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicsResponse>> getTopics(String clusterName, @Valid Integer page,
|
||||
public Mono<ResponseEntity<TopicsResponseDTO>> getTopics(String clusterName, @Valid Integer page,
|
||||
@Valid Integer perPage,
|
||||
@Valid Boolean showInternal,
|
||||
@Valid String search,
|
||||
@Valid TopicColumnsToSort orderBy,
|
||||
@Valid TopicColumnsToSortDTO orderBy,
|
||||
ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(clusterService
|
||||
.getTopics(
|
||||
|
@ -85,16 +85,16 @@ public class TopicsController implements TopicsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Topic>> updateTopic(
|
||||
String clusterId, String topicName, @Valid Mono<TopicUpdate> topicUpdate,
|
||||
public Mono<ResponseEntity<TopicDTO>> updateTopic(
|
||||
String clusterId, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<PartitionsIncreaseResponse>> increaseTopicPartitions(
|
||||
public Mono<ResponseEntity<PartitionsIncreaseResponseDTO>> increaseTopicPartitions(
|
||||
String clusterName, String topicName,
|
||||
Mono<PartitionsIncrease> partitionsIncrease,
|
||||
Mono<PartitionsIncreaseDTO> partitionsIncrease,
|
||||
ServerWebExchange exchange) {
|
||||
return partitionsIncrease.flatMap(
|
||||
partitions -> clusterService.increaseTopicPartitions(clusterName, topicName, partitions))
|
||||
|
@ -102,8 +102,9 @@ public class TopicsController implements TopicsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ReplicationFactorChangeResponse>> changeReplicationFactor(
|
||||
String clusterName, String topicName, Mono<ReplicationFactorChange> replicationFactorChange,
|
||||
public Mono<ResponseEntity<ReplicationFactorChangeResponseDTO>> changeReplicationFactor(
|
||||
String clusterName, String topicName,
|
||||
Mono<ReplicationFactorChangeDTO> replicationFactorChange,
|
||||
ServerWebExchange exchange) {
|
||||
return replicationFactorChange
|
||||
.flatMap(rfc -> clusterService.changeReplicationFactor(clusterName, topicName, rfc))
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.TopicMessageConsuming;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhase;
|
||||
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
import java.time.Duration;
|
||||
|
@ -28,7 +28,7 @@ public abstract class AbstractEmitter {
|
|||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEvent> sink, Consumer<Bytes, Bytes> consumer) {
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
|
||||
Instant start = Instant.now();
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
||||
Instant finish = Instant.now();
|
||||
|
@ -36,25 +36,25 @@ public abstract class AbstractEmitter {
|
|||
return records;
|
||||
}
|
||||
|
||||
protected FluxSink<TopicMessageEvent> sendMessage(FluxSink<TopicMessageEvent> sink,
|
||||
protected FluxSink<TopicMessageEventDTO> sendMessage(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecord<Bytes, Bytes> msg) {
|
||||
final TopicMessage topicMessage = ClusterUtil.mapToTopicMessage(msg, recordDeserializer);
|
||||
final TopicMessageDTO topicMessage = ClusterUtil.mapToTopicMessage(msg, recordDeserializer);
|
||||
return sink.next(
|
||||
new TopicMessageEvent()
|
||||
.type(TopicMessageEvent.TypeEnum.MESSAGE)
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
}
|
||||
|
||||
protected void sendPhase(FluxSink<TopicMessageEvent> sink, String name) {
|
||||
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
|
||||
sink.next(
|
||||
new TopicMessageEvent()
|
||||
.type(TopicMessageEvent.TypeEnum.PHASE)
|
||||
.phase(new TopicMessagePhase().name(name))
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.PHASE)
|
||||
.phase(new TopicMessagePhaseDTO().name(name))
|
||||
);
|
||||
}
|
||||
|
||||
protected void sendConsuming(FluxSink<TopicMessageEvent> sink,
|
||||
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> records,
|
||||
long elapsed) {
|
||||
for (ConsumerRecord<Bytes, Bytes> record : records) {
|
||||
|
@ -67,14 +67,14 @@ public abstract class AbstractEmitter {
|
|||
}
|
||||
this.records += records.count();
|
||||
this.elapsed += elapsed;
|
||||
final TopicMessageConsuming consuming = new TopicMessageConsuming()
|
||||
final TopicMessageConsumingDTO consuming = new TopicMessageConsumingDTO()
|
||||
.bytesConsumed(this.bytes)
|
||||
.elapsedMs(this.elapsed)
|
||||
.isCancelled(sink.isCancelled())
|
||||
.messagesConsumed(this.records);
|
||||
sink.next(
|
||||
new TopicMessageEvent()
|
||||
.type(TopicMessageEvent.TypeEnum.CONSUMING)
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
|
||||
.consuming(consuming)
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import java.util.Collections;
|
||||
|
@ -23,7 +23,7 @@ import reactor.core.publisher.FluxSink;
|
|||
@Log4j2
|
||||
public class BackwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEvent>> {
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeekBackward offsetsSeek;
|
||||
|
@ -38,7 +38,7 @@ public class BackwardRecordEmitter
|
|||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEvent> sink) {
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> configConsumer = consumerSupplier.apply(Map.of())) {
|
||||
final List<TopicPartition> requestedPartitions =
|
||||
offsetsSeek.getRequestedPartitions(configConsumer);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeek;
|
||||
import java.time.Duration;
|
||||
|
@ -16,7 +16,7 @@ import reactor.core.publisher.FluxSink;
|
|||
@Log4j2
|
||||
public class ForwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEvent>> {
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
|
||||
|
||||
|
@ -33,7 +33,7 @@ public class ForwardRecordEmitter
|
|||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEvent> sink) {
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Assigning partitions");
|
||||
var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.exception;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import com.provectus.kafka.ui.model.ErrorResponse;
|
||||
import com.provectus.kafka.ui.model.ErrorResponseDTO;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -69,7 +69,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
}
|
||||
|
||||
private Mono<ServerResponse> renderDefault(Throwable throwable, ServerRequest request) {
|
||||
var response = new ErrorResponse()
|
||||
var response = new ErrorResponseDTO()
|
||||
.code(ErrorCode.UNEXPECTED.code())
|
||||
.message(coalesce(throwable.getMessage(), "Unexpected internal error"))
|
||||
.requestId(requestId(request))
|
||||
|
@ -82,7 +82,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
|
||||
private Mono<ServerResponse> render(CustomBaseException baseException, ServerRequest request) {
|
||||
ErrorCode errorCode = baseException.getErrorCode();
|
||||
var response = new ErrorResponse()
|
||||
var response = new ErrorResponseDTO()
|
||||
.code(errorCode.code())
|
||||
.message(coalesce(baseException.getMessage(), "Internal error"))
|
||||
.requestId(requestId(request))
|
||||
|
@ -100,7 +100,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
|
||||
var fieldsErrors = fieldErrorsMap.entrySet().stream()
|
||||
.map(e -> {
|
||||
var err = new com.provectus.kafka.ui.model.FieldError();
|
||||
var err = new com.provectus.kafka.ui.model.FieldErrorDTO();
|
||||
err.setFieldName(e.getKey());
|
||||
err.setRestrictions(List.copyOf(e.getValue()));
|
||||
return err;
|
||||
|
@ -110,7 +110,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
? exception.getMessage()
|
||||
: "Fields validation failure";
|
||||
|
||||
var response = new ErrorResponse()
|
||||
var response = new ErrorResponseDTO()
|
||||
.code(ErrorCode.BINDING_FAIL.code())
|
||||
.message(message)
|
||||
.requestId(requestId(request))
|
||||
|
@ -124,7 +124,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
|
||||
private Mono<ServerResponse> render(ResponseStatusException exception, ServerRequest request) {
|
||||
String msg = coalesce(exception.getReason(), exception.getMessage(), "Server error");
|
||||
var response = new ErrorResponse()
|
||||
var response = new ErrorResponseDTO()
|
||||
.code(ErrorCode.UNEXPECTED.code())
|
||||
.message(msg)
|
||||
.requestId(requestId(request))
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
package com.provectus.kafka.ui.mapper;
|
||||
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.model.BrokerConfig;
|
||||
import com.provectus.kafka.ui.model.BrokerDiskUsage;
|
||||
import com.provectus.kafka.ui.model.BrokerMetrics;
|
||||
import com.provectus.kafka.ui.model.Cluster;
|
||||
import com.provectus.kafka.ui.model.ClusterMetrics;
|
||||
import com.provectus.kafka.ui.model.ClusterStats;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevel;
|
||||
import com.provectus.kafka.ui.model.ConfigSource;
|
||||
import com.provectus.kafka.ui.model.ConfigSynonym;
|
||||
import com.provectus.kafka.ui.model.Connect;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerDiskUsageDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
|
||||
import com.provectus.kafka.ui.model.ConfigSourceDTO;
|
||||
import com.provectus.kafka.ui.model.ConfigSynonymDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectDTO;
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||
|
@ -24,11 +24,11 @@ import com.provectus.kafka.ui.model.InternalTopic;
|
|||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KafkaConnectCluster;
|
||||
import com.provectus.kafka.ui.model.Partition;
|
||||
import com.provectus.kafka.ui.model.Replica;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicConfig;
|
||||
import com.provectus.kafka.ui.model.TopicDetails;
|
||||
import com.provectus.kafka.ui.model.PartitionDTO;
|
||||
import com.provectus.kafka.ui.model.ReplicaDTO;
|
||||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
|
||||
import java.math.BigDecimal;
|
||||
|
@ -54,7 +54,7 @@ public interface ClusterMapper {
|
|||
qualifiedByName = "sumMetrics")
|
||||
@Mapping(target = "bytesOutPerSec", source = "metrics.bytesOutPerSec",
|
||||
qualifiedByName = "sumMetrics")
|
||||
Cluster toCluster(KafkaCluster cluster);
|
||||
ClusterDTO toCluster(KafkaCluster cluster);
|
||||
|
||||
@Mapping(target = "protobufFile", source = "protobufFile", qualifiedByName = "resolvePath")
|
||||
@Mapping(target = "properties", source = "properties", qualifiedByName = "setProperties")
|
||||
|
@ -63,35 +63,35 @@ public interface ClusterMapper {
|
|||
|
||||
@Mapping(target = "diskUsage", source = "internalBrokerDiskUsage",
|
||||
qualifiedByName = "mapDiskUsage")
|
||||
ClusterStats toClusterStats(InternalClusterMetrics metrics);
|
||||
ClusterStatsDTO toClusterStats(InternalClusterMetrics metrics);
|
||||
|
||||
@Mapping(target = "items", source = "metrics")
|
||||
ClusterMetrics toClusterMetrics(InternalClusterMetrics metrics);
|
||||
ClusterMetricsDTO toClusterMetrics(InternalClusterMetrics metrics);
|
||||
|
||||
BrokerMetrics toBrokerMetrics(InternalBrokerMetrics metrics);
|
||||
BrokerMetricsDTO toBrokerMetrics(InternalBrokerMetrics metrics);
|
||||
|
||||
@Mapping(target = "isSensitive", source = "sensitive")
|
||||
@Mapping(target = "isReadOnly", source = "readOnly")
|
||||
BrokerConfig toBrokerConfig(InternalBrokerConfig config);
|
||||
BrokerConfigDTO toBrokerConfig(InternalBrokerConfig config);
|
||||
|
||||
default ConfigSynonym toConfigSynonym(ConfigEntry.ConfigSynonym config) {
|
||||
default ConfigSynonymDTO toConfigSynonym(ConfigEntry.ConfigSynonym config) {
|
||||
if (config == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
ConfigSynonym configSynonym = new ConfigSynonym();
|
||||
ConfigSynonymDTO configSynonym = new ConfigSynonymDTO();
|
||||
configSynonym.setName(config.name());
|
||||
configSynonym.setValue(config.value());
|
||||
if (config.source() != null) {
|
||||
configSynonym.setSource(ConfigSource.valueOf(config.source().name()));
|
||||
configSynonym.setSource(ConfigSourceDTO.valueOf(config.source().name()));
|
||||
}
|
||||
|
||||
return configSynonym;
|
||||
}
|
||||
|
||||
Topic toTopic(InternalTopic topic);
|
||||
TopicDTO toTopic(InternalTopic topic);
|
||||
|
||||
Partition toPartition(InternalPartition topic);
|
||||
PartitionDTO toPartition(InternalPartition topic);
|
||||
|
||||
@Named("setSchemaRegistry")
|
||||
default InternalSchemaRegistry setSchemaRegistry(ClustersProperties.Cluster clusterProperties) {
|
||||
|
@ -117,10 +117,10 @@ public interface ClusterMapper {
|
|||
return internalSchemaRegistry.build();
|
||||
}
|
||||
|
||||
TopicDetails toTopicDetails(InternalTopic topic);
|
||||
TopicDetailsDTO toTopicDetails(InternalTopic topic);
|
||||
|
||||
default TopicDetails toTopicDetails(InternalTopic topic, InternalClusterMetrics metrics) {
|
||||
final TopicDetails result = toTopicDetails(topic);
|
||||
default TopicDetailsDTO toTopicDetails(InternalTopic topic, InternalClusterMetrics metrics) {
|
||||
final TopicDetailsDTO result = toTopicDetails(topic);
|
||||
result.setBytesInPerSec(
|
||||
metrics.getBytesInPerSec().get(topic.getName())
|
||||
);
|
||||
|
@ -132,26 +132,26 @@ public interface ClusterMapper {
|
|||
|
||||
@Mapping(target = "isReadOnly", source = "readOnly")
|
||||
@Mapping(target = "isSensitive", source = "sensitive")
|
||||
TopicConfig toTopicConfig(InternalTopicConfig topic);
|
||||
TopicConfigDTO toTopicConfig(InternalTopicConfig topic);
|
||||
|
||||
Replica toReplica(InternalReplica replica);
|
||||
ReplicaDTO toReplica(InternalReplica replica);
|
||||
|
||||
Connect toKafkaConnect(KafkaConnectCluster connect);
|
||||
ConnectDTO toKafkaConnect(KafkaConnectCluster connect);
|
||||
|
||||
List<Cluster.FeaturesEnum> toFeaturesEnum(List<Feature> features);
|
||||
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<Feature> features);
|
||||
|
||||
@Mapping(target = "isCompatible", source = "compatible")
|
||||
CompatibilityCheckResponse toCompatibilityCheckResponse(InternalCompatibilityCheck dto);
|
||||
CompatibilityCheckResponseDTO toCompatibilityCheckResponse(InternalCompatibilityCheck dto);
|
||||
|
||||
@Mapping(target = "compatibility", source = "compatibilityLevel")
|
||||
CompatibilityLevel toCompatibilityLevel(InternalCompatibilityLevel dto);
|
||||
CompatibilityLevelDTO toCompatibilityLevel(InternalCompatibilityLevel dto);
|
||||
|
||||
default List<Partition> map(Map<Integer, InternalPartition> map) {
|
||||
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
|
||||
return map.values().stream().map(this::toPartition).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
default BrokerDiskUsage map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
|
||||
final BrokerDiskUsage brokerDiskUsage = new BrokerDiskUsage();
|
||||
default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
|
||||
final BrokerDiskUsageDTO brokerDiskUsage = new BrokerDiskUsageDTO();
|
||||
brokerDiskUsage.setBrokerId(id);
|
||||
brokerDiskUsage.segmentCount((int) internalBrokerDiskUsage.getSegmentCount());
|
||||
brokerDiskUsage.segmentSize(internalBrokerDiskUsage.getSegmentSize());
|
||||
|
@ -159,7 +159,7 @@ public interface ClusterMapper {
|
|||
}
|
||||
|
||||
@Named("mapDiskUsage")
|
||||
default List<BrokerDiskUsage> mapDiskUsage(Map<Integer, InternalBrokerDiskUsage> brokers) {
|
||||
default List<BrokerDiskUsageDTO> mapDiskUsage(Map<Integer, InternalBrokerDiskUsage> brokers) {
|
||||
return brokers.entrySet().stream().map(e -> this.map(e.getKey(), e.getValue()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package com.provectus.kafka.ui.mapper;
|
||||
|
||||
import com.provectus.kafka.ui.model.BrokerTopicLogdirs;
|
||||
import com.provectus.kafka.ui.model.BrokerTopicPartitionLogdir;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirs;
|
||||
import com.provectus.kafka.ui.model.BrokerTopicLogdirsDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerTopicPartitionLogdirDTO;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -14,7 +14,7 @@ import org.springframework.stereotype.Component;
|
|||
@Component
|
||||
public class DescribeLogDirsMapper {
|
||||
|
||||
public List<BrokersLogdirs> toBrokerLogDirsList(
|
||||
public List<BrokersLogdirsDTO> toBrokerLogDirsList(
|
||||
Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> logDirsInfo) {
|
||||
|
||||
return logDirsInfo.entrySet().stream().map(
|
||||
|
@ -24,9 +24,9 @@ public class DescribeLogDirsMapper {
|
|||
).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private BrokersLogdirs toBrokerLogDirs(Integer broker, String dirName,
|
||||
private BrokersLogdirsDTO toBrokerLogDirs(Integer broker, String dirName,
|
||||
DescribeLogDirsResponse.LogDirInfo logDirInfo) {
|
||||
BrokersLogdirs result = new BrokersLogdirs();
|
||||
BrokersLogdirsDTO result = new BrokersLogdirsDTO();
|
||||
result.setName(dirName);
|
||||
if (logDirInfo.error != null) {
|
||||
result.setError(logDirInfo.error.message());
|
||||
|
@ -39,10 +39,10 @@ public class DescribeLogDirsMapper {
|
|||
return result;
|
||||
}
|
||||
|
||||
private BrokerTopicLogdirs toTopicLogDirs(Integer broker, String name,
|
||||
private BrokerTopicLogdirsDTO toTopicLogDirs(Integer broker, String name,
|
||||
List<Map.Entry<TopicPartition,
|
||||
DescribeLogDirsResponse.ReplicaInfo>> partitions) {
|
||||
BrokerTopicLogdirs topic = new BrokerTopicLogdirs();
|
||||
BrokerTopicLogdirsDTO topic = new BrokerTopicLogdirsDTO();
|
||||
topic.setName(name);
|
||||
topic.setPartitions(
|
||||
partitions.stream().map(
|
||||
|
@ -52,10 +52,10 @@ public class DescribeLogDirsMapper {
|
|||
return topic;
|
||||
}
|
||||
|
||||
private BrokerTopicPartitionLogdir topicPartitionLogDir(Integer broker, Integer partition,
|
||||
private BrokerTopicPartitionLogdirDTO topicPartitionLogDir(Integer broker, Integer partition,
|
||||
DescribeLogDirsResponse.ReplicaInfo
|
||||
replicaInfo) {
|
||||
BrokerTopicPartitionLogdir logDir = new BrokerTopicPartitionLogdir();
|
||||
BrokerTopicPartitionLogdirDTO logDir = new BrokerTopicPartitionLogdirDTO();
|
||||
logDir.setBroker(broker);
|
||||
logDir.setPartition(partition);
|
||||
logDir.setSize(replicaInfo.size);
|
||||
|
|
|
@ -3,45 +3,46 @@ package com.provectus.kafka.ui.mapper;
|
|||
import com.provectus.kafka.ui.connect.model.ConnectorStatusConnector;
|
||||
import com.provectus.kafka.ui.connect.model.ConnectorTask;
|
||||
import com.provectus.kafka.ui.connect.model.NewConnector;
|
||||
import com.provectus.kafka.ui.model.Connector;
|
||||
import com.provectus.kafka.ui.model.ConnectorPlugin;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
|
||||
import com.provectus.kafka.ui.model.ConnectorStatus;
|
||||
import com.provectus.kafka.ui.model.ConnectorTaskStatus;
|
||||
import com.provectus.kafka.ui.model.FullConnectorInfo;
|
||||
import com.provectus.kafka.ui.model.Task;
|
||||
import com.provectus.kafka.ui.model.TaskStatus;
|
||||
import com.provectus.kafka.ui.model.ConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponseDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorStatusDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorTaskStatusDTO;
|
||||
import com.provectus.kafka.ui.model.FullConnectorInfoDTO;
|
||||
import com.provectus.kafka.ui.model.TaskDTO;
|
||||
import com.provectus.kafka.ui.model.TaskStatusDTO;
|
||||
import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
|
||||
import java.util.List;
|
||||
import org.mapstruct.Mapper;
|
||||
|
||||
@Mapper(componentModel = "spring")
|
||||
public interface KafkaConnectMapper {
|
||||
NewConnector toClient(com.provectus.kafka.ui.model.NewConnector newConnector);
|
||||
NewConnector toClient(com.provectus.kafka.ui.model.NewConnectorDTO newConnector);
|
||||
|
||||
Connector fromClient(com.provectus.kafka.ui.connect.model.Connector connector);
|
||||
ConnectorDTO fromClient(com.provectus.kafka.ui.connect.model.Connector connector);
|
||||
|
||||
ConnectorStatus fromClient(ConnectorStatusConnector connectorStatus);
|
||||
ConnectorStatusDTO fromClient(ConnectorStatusConnector connectorStatus);
|
||||
|
||||
Task fromClient(ConnectorTask connectorTask);
|
||||
TaskDTO fromClient(ConnectorTask connectorTask);
|
||||
|
||||
TaskStatus fromClient(com.provectus.kafka.ui.connect.model.TaskStatus taskStatus);
|
||||
TaskStatusDTO fromClient(com.provectus.kafka.ui.connect.model.TaskStatus taskStatus);
|
||||
|
||||
ConnectorPlugin fromClient(com.provectus.kafka.ui.connect.model.ConnectorPlugin connectorPlugin);
|
||||
ConnectorPluginDTO fromClient(
|
||||
com.provectus.kafka.ui.connect.model.ConnectorPlugin connectorPlugin);
|
||||
|
||||
ConnectorPluginConfigValidationResponse fromClient(
|
||||
ConnectorPluginConfigValidationResponseDTO fromClient(
|
||||
com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
|
||||
connectorPluginConfigValidationResponse);
|
||||
|
||||
default FullConnectorInfo fullConnectorInfoFromTuple(InternalConnectInfo connectInfo) {
|
||||
Connector connector = connectInfo.getConnector();
|
||||
List<Task> tasks = connectInfo.getTasks();
|
||||
default FullConnectorInfoDTO fullConnectorInfoFromTuple(InternalConnectInfo connectInfo) {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
List<TaskDTO> tasks = connectInfo.getTasks();
|
||||
int failedTasksCount = (int) tasks.stream()
|
||||
.map(Task::getStatus)
|
||||
.map(TaskStatus::getState)
|
||||
.filter(ConnectorTaskStatus.FAILED::equals)
|
||||
.map(TaskDTO::getStatus)
|
||||
.map(TaskStatusDTO::getState)
|
||||
.filter(ConnectorTaskStatusDTO.FAILED::equals)
|
||||
.count();
|
||||
return new FullConnectorInfo()
|
||||
return new FullConnectorInfoDTO()
|
||||
.connect(connector.getConnect())
|
||||
.name(connector.getName())
|
||||
.connectorClass((String) connectInfo.getConfig().get("connector.class"))
|
||||
|
|
|
@ -6,7 +6,7 @@ import org.apache.kafka.common.TopicPartition;
|
|||
|
||||
@Value
|
||||
public class ConsumerPosition {
|
||||
SeekType seekType;
|
||||
SeekTypeDTO seekType;
|
||||
Map<TopicPartition, Long> seekTo;
|
||||
SeekDirection seekDirection;
|
||||
SeekDirectionDTO seekDirection;
|
||||
}
|
||||
|
|
|
@ -7,5 +7,5 @@ import lombok.Data;
|
|||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalBrokerMetrics {
|
||||
private final List<Metric> metrics;
|
||||
private final List<MetricDTO> metrics;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ public class InternalClusterMetrics {
|
|||
private final long segmentSize;
|
||||
private final Map<Integer, InternalBrokerDiskUsage> internalBrokerDiskUsage;
|
||||
private final Map<Integer, InternalBrokerMetrics> internalBrokerMetrics;
|
||||
private final List<Metric> metrics;
|
||||
private final List<MetricDTO> metrics;
|
||||
private final int zooKeeperStatus;
|
||||
private final String version;
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ public class KafkaCluster {
|
|||
private final List<KafkaConnectCluster> kafkaConnect;
|
||||
private final String schemaNameTemplate;
|
||||
private final String keySchemaNameTemplate;
|
||||
private final ServerStatus status;
|
||||
private final ServerStatus zookeeperStatus;
|
||||
private final ServerStatusDTO status;
|
||||
private final ServerStatusDTO zookeeperStatus;
|
||||
private final InternalClusterMetrics metrics;
|
||||
private final Map<String, InternalTopic> topics;
|
||||
private final List<Integer> brokers;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.model.connect;
|
||||
|
||||
import com.provectus.kafka.ui.model.Connector;
|
||||
import com.provectus.kafka.ui.model.Task;
|
||||
import com.provectus.kafka.ui.model.ConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.TaskDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import lombok.Builder;
|
||||
|
@ -10,8 +10,8 @@ import lombok.Data;
|
|||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalConnectInfo {
|
||||
private final Connector connector;
|
||||
private final ConnectorDTO connector;
|
||||
private final Map<String, Object> config;
|
||||
private final List<Task> tasks;
|
||||
private final List<TaskDTO> tasks;
|
||||
private final List<String> topics;
|
||||
}
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
package com.provectus.kafka.ui.model.schemaregistry;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.provectus.kafka.ui.model.SchemaType;
|
||||
import com.provectus.kafka.ui.model.SchemaTypeDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaTypeDTO;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
public class InternalNewSchema {
|
||||
private String schema;
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
private SchemaType schemaType;
|
||||
private SchemaTypeDTO schemaType;
|
||||
|
||||
public InternalNewSchema(String schema, SchemaType schemaType) {
|
||||
public InternalNewSchema(String schema, SchemaTypeDTO schemaType) {
|
||||
this.schema = schema;
|
||||
this.schemaType = schemaType;
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import com.google.protobuf.Descriptors.Descriptor;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import com.provectus.kafka.ui.model.MessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.MessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.serde.schemaregistry.MessageFormat;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
|
||||
import com.provectus.kafka.ui.util.jsonschema.ProtobufSchemaConverter;
|
||||
|
@ -121,23 +121,23 @@ public class ProtobufFileRecordSerDe implements RecordSerDe {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageSchema getTopicSchema(String topic) {
|
||||
public TopicMessageSchemaDTO getTopicSchema(String topic) {
|
||||
|
||||
final JsonSchema jsonSchema = schemaConverter.convert(
|
||||
protobufSchemaPath.toUri(),
|
||||
getDescriptor(topic)
|
||||
);
|
||||
final MessageSchema keySchema = new MessageSchema()
|
||||
final MessageSchemaDTO keySchema = new MessageSchemaDTO()
|
||||
.name(protobufSchema.fullName())
|
||||
.source(MessageSchema.SourceEnum.PROTO_FILE)
|
||||
.source(MessageSchemaDTO.SourceEnum.PROTO_FILE)
|
||||
.schema(JsonSchema.stringSchema().toJson(objectMapper));
|
||||
|
||||
final MessageSchema valueSchema = new MessageSchema()
|
||||
final MessageSchemaDTO valueSchema = new MessageSchemaDTO()
|
||||
.name(protobufSchema.fullName())
|
||||
.source(MessageSchema.SourceEnum.PROTO_FILE)
|
||||
.source(MessageSchemaDTO.SourceEnum.PROTO_FILE)
|
||||
.schema(jsonSchema.toJson(objectMapper));
|
||||
|
||||
return new TopicMessageSchema()
|
||||
return new TopicMessageSchemaDTO()
|
||||
.key(keySchema)
|
||||
.value(valueSchema);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui.serde;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.serde.schemaregistry.MessageFormat;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.Builder;
|
||||
|
@ -29,5 +29,5 @@ public interface RecordSerDe {
|
|||
@Nullable String data,
|
||||
@Nullable Integer partition);
|
||||
|
||||
TopicMessageSchema getTopicSchema(String topic);
|
||||
TopicMessageSchemaDTO getTopicSchema(String topic);
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package com.provectus.kafka.ui.serde;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.model.MessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.MessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.serde.schemaregistry.MessageFormat;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordUtil;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
|
||||
|
@ -41,12 +41,12 @@ public class SimpleRecordSerDe implements RecordSerDe {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageSchema getTopicSchema(String topic) {
|
||||
final MessageSchema schema = new MessageSchema()
|
||||
public TopicMessageSchemaDTO getTopicSchema(String topic) {
|
||||
final MessageSchemaDTO schema = new MessageSchemaDTO()
|
||||
.name("unknown")
|
||||
.source(MessageSchema.SourceEnum.UNKNOWN)
|
||||
.source(MessageSchemaDTO.SourceEnum.UNKNOWN)
|
||||
.schema(JsonSchema.stringSchema().toJson(new ObjectMapper()));
|
||||
return new TopicMessageSchema()
|
||||
return new TopicMessageSchemaDTO()
|
||||
.key(schema)
|
||||
.value(schema);
|
||||
}
|
||||
|
|
|
@ -7,8 +7,8 @@ import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.USER
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.MessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordUtil;
|
||||
import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
|
||||
|
@ -185,7 +185,7 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageSchema getTopicSchema(String topic) {
|
||||
public TopicMessageSchemaDTO getTopicSchema(String topic) {
|
||||
final Optional<SchemaMetadata> maybeValueSchema = getSchemaBySubject(topic, false);
|
||||
final Optional<SchemaMetadata> maybeKeySchema = getSchemaBySubject(topic, true);
|
||||
|
||||
|
@ -195,21 +195,21 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
|
|||
String sourceKeySchema = maybeKeySchema.map(this::convertSchema)
|
||||
.orElseGet(() -> JsonSchema.stringSchema().toJson(objectMapper));
|
||||
|
||||
final MessageSchema keySchema = new MessageSchema()
|
||||
final MessageSchemaDTO keySchema = new MessageSchemaDTO()
|
||||
.name(maybeKeySchema.map(
|
||||
(s) -> schemaSubject(topic, true)
|
||||
).orElse("unknown"))
|
||||
.source(MessageSchema.SourceEnum.SCHEMA_REGISTRY)
|
||||
.source(MessageSchemaDTO.SourceEnum.SCHEMA_REGISTRY)
|
||||
.schema(sourceKeySchema);
|
||||
|
||||
final MessageSchema valueSchema = new MessageSchema()
|
||||
final MessageSchemaDTO valueSchema = new MessageSchemaDTO()
|
||||
.name(maybeValueSchema.map(
|
||||
(s) -> schemaSubject(topic, false)
|
||||
).orElse("unknown"))
|
||||
.source(MessageSchema.SourceEnum.SCHEMA_REGISTRY)
|
||||
.source(MessageSchemaDTO.SourceEnum.SCHEMA_REGISTRY)
|
||||
.schema(sourceValueSchema);
|
||||
|
||||
return new TopicMessageSchema()
|
||||
return new TopicMessageSchemaDTO()
|
||||
.key(keySchema)
|
||||
.value(valueSchema);
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.util.Map;
|
||||
|
@ -34,7 +34,7 @@ public interface BrokerService {
|
|||
* @param cluster - cluster
|
||||
* @return Flux of Broker
|
||||
*/
|
||||
Flux<Broker> getBrokers(KafkaCluster cluster);
|
||||
Flux<BrokerDTO> getBrokers(KafkaCluster cluster);
|
||||
|
||||
/**
|
||||
* Get cluster controller node.
|
||||
|
|
|
@ -2,7 +2,7 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
|
@ -85,12 +85,12 @@ public class BrokerServiceImpl implements BrokerService {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Flux<Broker> getBrokers(KafkaCluster cluster) {
|
||||
public Flux<BrokerDTO> getBrokers(KafkaCluster cluster) {
|
||||
return adminClientService
|
||||
.getOrCreateAdminClient(cluster)
|
||||
.flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
|
||||
.map(n -> n.stream().map(node -> {
|
||||
Broker broker = new Broker();
|
||||
BrokerDTO broker = new BrokerDTO();
|
||||
broker.setId(node.id());
|
||||
broker.setHost(node.host());
|
||||
return broker;
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.util.Constants.DELETE_TOPIC_ENABLE;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
|
@ -9,36 +7,35 @@ import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
|||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.mapper.DescribeLogDirsMapper;
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.BrokerConfig;
|
||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdate;
|
||||
import com.provectus.kafka.ui.model.BrokerMetrics;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirs;
|
||||
import com.provectus.kafka.ui.model.Cluster;
|
||||
import com.provectus.kafka.ui.model.ClusterMetrics;
|
||||
import com.provectus.kafka.ui.model.ClusterStats;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChange;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponse;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSort;
|
||||
import com.provectus.kafka.ui.model.TopicConfig;
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicDetails;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicUpdate;
|
||||
import com.provectus.kafka.ui.model.TopicsResponse;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
||||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchemaDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
import java.util.Collections;
|
||||
|
@ -51,7 +48,6 @@ import lombok.RequiredArgsConstructor;
|
|||
import lombok.SneakyThrows;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult;
|
||||
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
||||
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
@ -74,21 +70,21 @@ public class ClusterService {
|
|||
private final DeserializationService deserializationService;
|
||||
private final DescribeLogDirsMapper describeLogDirsMapper;
|
||||
|
||||
public List<Cluster> getClusters() {
|
||||
public List<ClusterDTO> getClusters() {
|
||||
return clustersStorage.getKafkaClusters()
|
||||
.stream()
|
||||
.map(clusterMapper::toCluster)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public Mono<BrokerMetrics> getBrokerMetrics(String name, Integer id) {
|
||||
public Mono<BrokerMetricsDTO> getBrokerMetrics(String name, Integer id) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(name)
|
||||
.map(c -> c.getMetrics().getInternalBrokerMetrics())
|
||||
.map(m -> m.get(id))
|
||||
.map(clusterMapper::toBrokerMetrics));
|
||||
}
|
||||
|
||||
public Mono<ClusterStats> getClusterStats(String name) {
|
||||
public Mono<ClusterStatsDTO> getClusterStats(String name) {
|
||||
return Mono.justOrEmpty(
|
||||
clustersStorage.getClusterByName(name)
|
||||
.map(KafkaCluster::getMetrics)
|
||||
|
@ -96,7 +92,7 @@ public class ClusterService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<ClusterMetrics> getClusterMetrics(String name) {
|
||||
public Mono<ClusterMetricsDTO> getClusterMetrics(String name) {
|
||||
return Mono.justOrEmpty(
|
||||
clustersStorage.getClusterByName(name)
|
||||
.map(KafkaCluster::getMetrics)
|
||||
|
@ -105,11 +101,11 @@ public class ClusterService {
|
|||
}
|
||||
|
||||
|
||||
public TopicsResponse getTopics(String name, Optional<Integer> page,
|
||||
public TopicsResponseDTO getTopics(String name, Optional<Integer> page,
|
||||
Optional<Integer> nullablePerPage,
|
||||
Optional<Boolean> showInternal,
|
||||
Optional<String> search,
|
||||
Optional<TopicColumnsToSort> sortBy) {
|
||||
Optional<TopicColumnsToSortDTO> sortBy) {
|
||||
Predicate<Integer> positiveInt = i -> i > 0;
|
||||
int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
|
||||
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
|
||||
|
@ -128,7 +124,7 @@ public class ClusterService {
|
|||
.collect(Collectors.toList());
|
||||
var totalPages = (topics.size() / perPage)
|
||||
+ (topics.size() % perPage == 0 ? 0 : 1);
|
||||
return new TopicsResponse()
|
||||
return new TopicsResponseDTO()
|
||||
.pageCount(totalPages)
|
||||
.topics(
|
||||
topics.stream()
|
||||
|
@ -145,7 +141,7 @@ public class ClusterService {
|
|||
);
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(Optional<TopicColumnsToSort> sortBy) {
|
||||
private Comparator<InternalTopic> getComparatorForTopic(Optional<TopicColumnsToSortDTO> sortBy) {
|
||||
var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
||||
if (sortBy.isEmpty()) {
|
||||
return defaultComparator;
|
||||
|
@ -163,7 +159,7 @@ public class ClusterService {
|
|||
}
|
||||
}
|
||||
|
||||
public Optional<TopicDetails> getTopicDetails(String name, String topicName) {
|
||||
public Optional<TopicDetailsDTO> getTopicDetails(String name, String topicName) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
.flatMap(c ->
|
||||
Optional.ofNullable(c.getTopics()).map(l -> l.get(topicName)).map(
|
||||
|
@ -174,7 +170,7 @@ public class ClusterService {
|
|||
);
|
||||
}
|
||||
|
||||
public Optional<List<TopicConfig>> getTopicConfigs(String name, String topicName) {
|
||||
public Optional<List<TopicConfigDTO>> getTopicConfigs(String name, String topicName) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
.map(KafkaCluster::getTopics)
|
||||
.map(t -> t.get(topicName))
|
||||
|
@ -182,7 +178,7 @@ public class ClusterService {
|
|||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
public Mono<Topic> createTopic(String clusterName, Mono<TopicCreation> topicCreation) {
|
||||
public Mono<TopicDTO> createTopic(String clusterName, Mono<TopicCreationDTO> topicCreation) {
|
||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
||||
kafkaService.createTopic(cluster, topicCreation)
|
||||
.doOnNext(t -> updateCluster(t, clusterName, cluster))
|
||||
|
@ -191,7 +187,7 @@ public class ClusterService {
|
|||
}
|
||||
|
||||
@SneakyThrows
|
||||
public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName,
|
||||
public Mono<ConsumerGroupDetailsDTO> getConsumerGroupDetail(String clusterName,
|
||||
String consumerGroupId) {
|
||||
var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
|
||||
return kafkaService.getConsumerGroups(
|
||||
|
@ -203,11 +199,12 @@ public class ClusterService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
|
||||
public Mono<List<ConsumerGroupDTO>> getConsumerGroups(String clusterName) {
|
||||
return getConsumerGroups(clusterName, Optional.empty());
|
||||
}
|
||||
|
||||
public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName, Optional<String> topic) {
|
||||
public Mono<List<ConsumerGroupDTO>> getConsumerGroups(String clusterName,
|
||||
Optional<String> topic) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMap(c -> kafkaService.getConsumerGroups(c, topic, Collections.emptyList()))
|
||||
|
@ -216,13 +213,13 @@ public class ClusterService {
|
|||
);
|
||||
}
|
||||
|
||||
public Flux<Broker> getBrokers(String clusterName) {
|
||||
public Flux<BrokerDTO> getBrokers(String clusterName) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMapMany(brokerService::getBrokers);
|
||||
}
|
||||
|
||||
public Flux<BrokerConfig> getBrokerConfig(String clusterName, Integer brokerId) {
|
||||
public Flux<BrokerConfigDTO> getBrokerConfig(String clusterName, Integer brokerId) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMapMany(c -> brokerService.getBrokersConfig(c, brokerId))
|
||||
|
@ -230,8 +227,8 @@ public class ClusterService {
|
|||
}
|
||||
|
||||
@SneakyThrows
|
||||
public Mono<Topic> updateTopic(String clusterName, String topicName,
|
||||
Mono<TopicUpdate> topicUpdate) {
|
||||
public Mono<TopicDTO> updateTopic(String clusterName, String topicName,
|
||||
Mono<TopicUpdateDTO> topicUpdate) {
|
||||
return clustersStorage.getClusterByName(clusterName).map(cl ->
|
||||
topicUpdate
|
||||
.flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
|
||||
|
@ -267,7 +264,7 @@ public class ClusterService {
|
|||
return updatedCluster;
|
||||
}
|
||||
|
||||
public Mono<Cluster> updateCluster(String clusterName) {
|
||||
public Mono<ClusterDTO> updateCluster(String clusterName) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> kafkaService.getUpdatedCluster(cluster)
|
||||
.doOnNext(updatedCluster -> clustersStorage
|
||||
|
@ -276,7 +273,7 @@ public class ClusterService {
|
|||
.orElse(Mono.error(new ClusterNotFoundException()));
|
||||
}
|
||||
|
||||
public Flux<TopicMessageEvent> getMessages(String clusterName, String topicName,
|
||||
public Flux<TopicMessageEventDTO> getMessages(String clusterName, String topicName,
|
||||
ConsumerPosition consumerPosition, String query,
|
||||
Integer limit) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
|
@ -295,14 +292,14 @@ public class ClusterService {
|
|||
.flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
|
||||
}
|
||||
|
||||
public Mono<PartitionsIncreaseResponse> increaseTopicPartitions(
|
||||
public Mono<PartitionsIncreaseResponseDTO> increaseTopicPartitions(
|
||||
String clusterName,
|
||||
String topicName,
|
||||
PartitionsIncrease partitionsIncrease) {
|
||||
PartitionsIncreaseDTO partitionsIncrease) {
|
||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
||||
kafkaService.increaseTopicPartitions(cluster, topicName, partitionsIncrease)
|
||||
.doOnNext(t -> updateCluster(t, cluster.getName(), cluster))
|
||||
.map(t -> new PartitionsIncreaseResponse()
|
||||
.map(t -> new PartitionsIncreaseResponseDTO()
|
||||
.topicName(t.getName())
|
||||
.totalPartitionsCount(t.getPartitionCount())))
|
||||
.orElse(Mono.error(new ClusterNotFoundException(
|
||||
|
@ -325,7 +322,7 @@ public class ClusterService {
|
|||
.orElse(Mono.empty());
|
||||
}
|
||||
|
||||
public TopicMessageSchema getTopicSchema(String clusterName, String topicName) {
|
||||
public TopicMessageSchemaDTO getTopicSchema(String clusterName, String topicName) {
|
||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
||||
.orElseThrow(ClusterNotFoundException::new);
|
||||
if (!cluster.getTopics().containsKey(topicName)) {
|
||||
|
@ -336,7 +333,7 @@ public class ClusterService {
|
|||
.getTopicSchema(topicName);
|
||||
}
|
||||
|
||||
public Mono<Void> sendMessage(String clusterName, String topicName, CreateTopicMessage msg) {
|
||||
public Mono<Void> sendMessage(String clusterName, String topicName, CreateTopicMessageDTO msg) {
|
||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
||||
.orElseThrow(ClusterNotFoundException::new);
|
||||
if (!cluster.getTopics().containsKey(topicName)) {
|
||||
|
@ -363,21 +360,21 @@ public class ClusterService {
|
|||
}
|
||||
}
|
||||
|
||||
public Mono<ReplicationFactorChangeResponse> changeReplicationFactor(
|
||||
public Mono<ReplicationFactorChangeResponseDTO> changeReplicationFactor(
|
||||
String clusterName,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
||||
kafkaService.changeReplicationFactor(cluster, topicName, replicationFactorChange)
|
||||
.doOnNext(topic -> updateCluster(topic, cluster.getName(), cluster))
|
||||
.map(t -> new ReplicationFactorChangeResponse()
|
||||
.map(t -> new ReplicationFactorChangeResponseDTO()
|
||||
.topicName(t.getName())
|
||||
.totalReplicationFactor(t.getReplicationFactor())))
|
||||
.orElse(Mono.error(new ClusterNotFoundException(
|
||||
String.format("No cluster for name '%s'", clusterName))));
|
||||
}
|
||||
|
||||
public Flux<BrokersLogdirs> getAllBrokersLogdirs(String clusterName, List<Integer> brokers) {
|
||||
public Flux<BrokersLogdirsDTO> getAllBrokersLogdirs(String clusterName, List<Integer> brokers) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.flatMap(c -> kafkaService.getClusterLogDirs(c, brokers))
|
||||
.map(describeLogDirsMapper::toBrokerLogDirsList)
|
||||
|
@ -385,7 +382,7 @@ public class ClusterService {
|
|||
}
|
||||
|
||||
public Mono<Void> updateBrokerLogDir(
|
||||
String clusterName, Integer id, BrokerLogdirUpdate brokerLogDir) {
|
||||
String clusterName, Integer id, BrokerLogdirUpdateDTO brokerLogDir) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.flatMap(c -> kafkaService.updateBrokerLogDir(c, id, brokerLogDir));
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@ import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
|||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.FilterTopicMessageEvents;
|
||||
|
@ -43,17 +43,17 @@ public class ConsumingService {
|
|||
private final DeserializationService deserializationService;
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
public Flux<TopicMessageEvent> loadMessages(KafkaCluster cluster, String topic,
|
||||
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
||||
ConsumerPosition consumerPosition, String query,
|
||||
Integer limit) {
|
||||
int recordsLimit = Optional.ofNullable(limit)
|
||||
.map(s -> Math.min(s, MAX_RECORD_LIMIT))
|
||||
.orElse(DEFAULT_RECORD_LIMIT);
|
||||
|
||||
java.util.function.Consumer<? super FluxSink<TopicMessageEvent>> emitter;
|
||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||
RecordSerDe recordDeserializer =
|
||||
deserializationService.getRecordDeserializerForCluster(cluster);
|
||||
if (consumerPosition.getSeekDirection().equals(SeekDirection.FORWARD)) {
|
||||
if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) {
|
||||
emitter = new ForwardRecordEmitter(
|
||||
() -> kafkaService.createConsumer(cluster),
|
||||
new OffsetsSeekForward(topic, consumerPosition),
|
||||
|
@ -104,14 +104,14 @@ public class ConsumingService {
|
|||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
}
|
||||
|
||||
private boolean filterTopicMessage(TopicMessageEvent message, String query) {
|
||||
private boolean filterTopicMessage(TopicMessageEventDTO message, String query) {
|
||||
log.info("filter");
|
||||
if (StringUtils.isEmpty(query)
|
||||
|| !message.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE)) {
|
||||
|| !message.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
final TopicMessage msg = message.getMessage();
|
||||
final TopicMessageDTO msg = message.getMessage();
|
||||
return (!StringUtils.isEmpty(msg.getKey()) && msg.getKey().contains(query))
|
||||
|| (!StringUtils.isEmpty(msg.getContent()) && msg.getContent().contains(query));
|
||||
}
|
||||
|
|
|
@ -9,17 +9,17 @@ import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|||
import com.provectus.kafka.ui.exception.ConnectNotFoundException;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.mapper.KafkaConnectMapper;
|
||||
import com.provectus.kafka.ui.model.Connect;
|
||||
import com.provectus.kafka.ui.model.Connector;
|
||||
import com.provectus.kafka.ui.model.ConnectorAction;
|
||||
import com.provectus.kafka.ui.model.ConnectorPlugin;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
|
||||
import com.provectus.kafka.ui.model.ConnectorState;
|
||||
import com.provectus.kafka.ui.model.FullConnectorInfo;
|
||||
import com.provectus.kafka.ui.model.ConnectDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorActionDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponseDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorStateDTO;
|
||||
import com.provectus.kafka.ui.model.FullConnectorInfoDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KafkaConnectCluster;
|
||||
import com.provectus.kafka.ui.model.NewConnector;
|
||||
import com.provectus.kafka.ui.model.Task;
|
||||
import com.provectus.kafka.ui.model.NewConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.TaskDTO;
|
||||
import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
@ -47,7 +47,7 @@ public class KafkaConnectService {
|
|||
private final KafkaConnectMapper kafkaConnectMapper;
|
||||
private final ObjectMapper objectMapper;
|
||||
|
||||
public Mono<Flux<Connect>> getConnects(String clusterName) {
|
||||
public Mono<Flux<ConnectDTO>> getConnects(String clusterName) {
|
||||
return Mono.just(
|
||||
Flux.fromIterable(clustersStorage.getClusterByName(clusterName)
|
||||
.map(KafkaCluster::getKafkaConnect).stream()
|
||||
|
@ -58,7 +58,8 @@ public class KafkaConnectService {
|
|||
);
|
||||
}
|
||||
|
||||
public Flux<FullConnectorInfo> getAllConnectors(final String clusterName, final String search) {
|
||||
public Flux<FullConnectorInfoDTO> getAllConnectors(final String clusterName,
|
||||
final String search) {
|
||||
return getConnects(clusterName)
|
||||
.flatMapMany(Function.identity())
|
||||
.flatMap(connect -> getConnectorNames(clusterName, connect))
|
||||
|
@ -72,7 +73,7 @@ public class KafkaConnectService {
|
|||
)
|
||||
)
|
||||
.flatMap(connectInfo -> {
|
||||
Connector connector = connectInfo.getConnector();
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
return getConnectorTasks(clusterName, connector.getConnect(), connector.getName())
|
||||
.collectList()
|
||||
.map(tasks -> InternalConnectInfo.builder()
|
||||
|
@ -83,7 +84,7 @@ public class KafkaConnectService {
|
|||
);
|
||||
})
|
||||
.flatMap(connectInfo -> {
|
||||
Connector connector = connectInfo.getConnector();
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
return getConnectorTopics(clusterName, connector.getConnect(), connector.getName())
|
||||
.map(ct -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
|
@ -97,7 +98,7 @@ public class KafkaConnectService {
|
|||
.filter(matchesSearchTerm(search));
|
||||
}
|
||||
|
||||
private Predicate<FullConnectorInfo> matchesSearchTerm(final String search) {
|
||||
private Predicate<FullConnectorInfoDTO> matchesSearchTerm(final String search) {
|
||||
return (connector) -> getSearchValues(connector)
|
||||
.anyMatch(value -> value.contains(
|
||||
StringUtils.defaultString(
|
||||
|
@ -106,7 +107,7 @@ public class KafkaConnectService {
|
|||
.toUpperCase()));
|
||||
}
|
||||
|
||||
private Stream<String> getSearchValues(FullConnectorInfo fullConnectorInfo) {
|
||||
private Stream<String> getSearchValues(FullConnectorInfoDTO fullConnectorInfo) {
|
||||
return Stream.of(
|
||||
fullConnectorInfo.getName(),
|
||||
fullConnectorInfo.getStatus().getState().getValue(),
|
||||
|
@ -124,7 +125,7 @@ public class KafkaConnectService {
|
|||
);
|
||||
}
|
||||
|
||||
private Flux<Tuple2<String, String>> getConnectorNames(String clusterName, Connect connect) {
|
||||
private Flux<Tuple2<String, String>> getConnectorNames(String clusterName, ConnectDTO connect) {
|
||||
return getConnectors(clusterName, connect.getName())
|
||||
.collectList().map(e -> e.get(0))
|
||||
// for some reason `getConnectors` method returns the response as a single string
|
||||
|
@ -147,8 +148,8 @@ public class KafkaConnectService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Connector> createConnector(String clusterName, String connectName,
|
||||
Mono<NewConnector> connector) {
|
||||
public Mono<ConnectorDTO> createConnector(String clusterName, String connectName,
|
||||
Mono<NewConnectorDTO> connector) {
|
||||
return getConnectAddress(clusterName, connectName)
|
||||
.flatMap(connect ->
|
||||
connector
|
||||
|
@ -160,7 +161,7 @@ public class KafkaConnectService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Connector> getConnector(String clusterName, String connectName,
|
||||
public Mono<ConnectorDTO> getConnector(String clusterName, String connectName,
|
||||
String connectorName) {
|
||||
return getConnectAddress(clusterName, connectName)
|
||||
.flatMap(connect -> KafkaConnectClients.withBaseUrl(connect).getConnector(connectorName)
|
||||
|
@ -169,7 +170,7 @@ public class KafkaConnectService {
|
|||
KafkaConnectClients.withBaseUrl(connect).getConnectorStatus(connector.getName())
|
||||
.map(connectorStatus -> {
|
||||
var status = connectorStatus.getConnector();
|
||||
Connector result = (Connector) new Connector()
|
||||
ConnectorDTO result = (ConnectorDTO) new ConnectorDTO()
|
||||
.connect(connectName)
|
||||
.status(kafkaConnectMapper.fromClient(status))
|
||||
.type(connector.getType())
|
||||
|
@ -183,7 +184,7 @@ public class KafkaConnectService {
|
|||
.anyMatch(TaskStatus.StateEnum.FAILED::equals);
|
||||
|
||||
if (isAnyTaskFailed) {
|
||||
result.getStatus().state(ConnectorState.TASK_FAILED);
|
||||
result.getStatus().state(ConnectorStateDTO.TASK_FAILED);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
|
@ -200,7 +201,7 @@ public class KafkaConnectService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Connector> setConnectorConfig(String clusterName, String connectName,
|
||||
public Mono<ConnectorDTO> setConnectorConfig(String clusterName, String connectName,
|
||||
String connectorName, Mono<Object> requestBody) {
|
||||
return getConnectAddress(clusterName, connectName)
|
||||
.flatMap(connect ->
|
||||
|
@ -220,7 +221,7 @@ public class KafkaConnectService {
|
|||
}
|
||||
|
||||
public Mono<Void> updateConnectorState(String clusterName, String connectName,
|
||||
String connectorName, ConnectorAction action) {
|
||||
String connectorName, ConnectorActionDTO action) {
|
||||
Function<String, Mono<Void>> kafkaClientCall;
|
||||
switch (action) {
|
||||
case RESTART:
|
||||
|
@ -242,7 +243,7 @@ public class KafkaConnectService {
|
|||
.flatMap(kafkaClientCall);
|
||||
}
|
||||
|
||||
public Flux<Task> getConnectorTasks(String clusterName, String connectName,
|
||||
public Flux<TaskDTO> getConnectorTasks(String clusterName, String connectName,
|
||||
String connectorName) {
|
||||
return getConnectAddress(clusterName, connectName)
|
||||
.flatMapMany(connect ->
|
||||
|
@ -265,7 +266,8 @@ public class KafkaConnectService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Flux<ConnectorPlugin>> getConnectorPlugins(String clusterName, String connectName) {
|
||||
public Mono<Flux<ConnectorPluginDTO>> getConnectorPlugins(String clusterName,
|
||||
String connectName) {
|
||||
return Mono.just(getConnectAddress(clusterName, connectName)
|
||||
.flatMapMany(connect ->
|
||||
KafkaConnectClients.withBaseUrl(connect).getConnectorPlugins()
|
||||
|
@ -273,7 +275,7 @@ public class KafkaConnectService {
|
|||
));
|
||||
}
|
||||
|
||||
public Mono<ConnectorPluginConfigValidationResponse> validateConnectorPluginConfig(
|
||||
public Mono<ConnectorPluginConfigValidationResponseDTO> validateConnectorPluginConfig(
|
||||
String clusterName, String connectName, String pluginName, Mono<Object> requestBody) {
|
||||
return getConnectAddress(clusterName, connectName)
|
||||
.flatMap(connect ->
|
||||
|
|
|
@ -5,9 +5,9 @@ import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
|
|||
import com.provectus.kafka.ui.exception.TopicMetadataException;
|
||||
import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdate;
|
||||
import com.provectus.kafka.ui.model.BrokerLogdirUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.CleanupPolicy;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
||||
|
@ -19,12 +19,12 @@ import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
|
|||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Metric;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChange;
|
||||
import com.provectus.kafka.ui.model.ServerStatus;
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicUpdate;
|
||||
import com.provectus.kafka.ui.model.MetricDTO;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeDTO;
|
||||
import com.provectus.kafka.ui.model.ServerStatusDTO;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
|
@ -149,7 +149,7 @@ public class KafkaService {
|
|||
log.error("Failed to collect cluster {} info", cluster.getName(), e)
|
||||
).onErrorResume(
|
||||
e -> Mono.just(cluster.toBuilder()
|
||||
.status(ServerStatus.OFFLINE)
|
||||
.status(ServerStatusDTO.OFFLINE)
|
||||
.lastKafkaException(e)
|
||||
.build())
|
||||
);
|
||||
|
@ -168,12 +168,12 @@ public class KafkaService {
|
|||
|
||||
InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics);
|
||||
|
||||
ServerStatus zookeeperStatus = ServerStatus.OFFLINE;
|
||||
ServerStatusDTO zookeeperStatus = ServerStatusDTO.OFFLINE;
|
||||
Throwable zookeeperException = null;
|
||||
try {
|
||||
zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster)
|
||||
? ServerStatus.ONLINE
|
||||
: ServerStatus.OFFLINE;
|
||||
? ServerStatusDTO.ONLINE
|
||||
: ServerStatusDTO.OFFLINE;
|
||||
} catch (Throwable e) {
|
||||
zookeeperException = e;
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ public class KafkaService {
|
|||
|
||||
return currentCluster.toBuilder()
|
||||
.version(version)
|
||||
.status(ServerStatus.ONLINE)
|
||||
.status(ServerStatusDTO.ONLINE)
|
||||
.zookeeperStatus(zookeeperStatus)
|
||||
.lastZookeeperException(zookeeperException)
|
||||
.lastKafkaException(null)
|
||||
|
@ -290,7 +290,7 @@ public class KafkaService {
|
|||
|
||||
@SneakyThrows
|
||||
public Mono<InternalTopic> createTopic(AdminClient adminClient,
|
||||
Mono<TopicCreation> topicCreation) {
|
||||
Mono<TopicCreationDTO> topicCreation) {
|
||||
return topicCreation.flatMap(
|
||||
topicData -> {
|
||||
NewTopic newTopic = new NewTopic(topicData.getName(), topicData.getPartitions(),
|
||||
|
@ -306,7 +306,8 @@ public class KafkaService {
|
|||
).switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")));
|
||||
}
|
||||
|
||||
public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicCreation> topicCreation) {
|
||||
public Mono<InternalTopic> createTopic(
|
||||
KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> createTopic(ac.getAdminClient(), topicCreation));
|
||||
}
|
||||
|
@ -427,7 +428,7 @@ public class KafkaService {
|
|||
|
||||
@SneakyThrows
|
||||
public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName,
|
||||
TopicUpdate topicUpdate) {
|
||||
TopicUpdateDTO topicUpdate) {
|
||||
ConfigResource topicCr = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
|
@ -448,7 +449,7 @@ public class KafkaService {
|
|||
.filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow());
|
||||
}
|
||||
|
||||
private Mono<String> incrementalAlterConfig(TopicUpdate topicUpdate, ConfigResource topicCr,
|
||||
private Mono<String> incrementalAlterConfig(TopicUpdateDTO topicUpdate, ConfigResource topicCr,
|
||||
ExtendedAdminClient ac) {
|
||||
List<AlterConfigOp> listOp = topicUpdate.getConfigs().entrySet().stream()
|
||||
.flatMap(cfg -> Stream.of(new AlterConfigOp(new ConfigEntry(cfg.getKey(), cfg.getValue()),
|
||||
|
@ -459,7 +460,7 @@ public class KafkaService {
|
|||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
private Mono<String> alterConfig(TopicUpdate topicUpdate, ConfigResource topicCr,
|
||||
private Mono<String> alterConfig(TopicUpdateDTO topicUpdate, ConfigResource topicCr,
|
||||
ExtendedAdminClient ac) {
|
||||
List<ConfigEntry> configEntries = topicUpdate.getConfigs().entrySet().stream()
|
||||
.flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue())))
|
||||
|
@ -601,7 +602,7 @@ public class KafkaService {
|
|||
);
|
||||
}
|
||||
|
||||
public List<Metric> getJmxMetric(String clusterName, Node node) {
|
||||
public List<MetricDTO> getJmxMetric(String clusterName, Node node) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.filter(c -> c.getJmxPort() != null)
|
||||
.filter(c -> c.getJmxPort() > 0)
|
||||
|
@ -631,11 +632,12 @@ public class KafkaService {
|
|||
|
||||
private InternalClusterMetrics calculateClusterMetrics(
|
||||
InternalClusterMetrics internalClusterMetrics) {
|
||||
final List<Metric> metrics = internalClusterMetrics.getInternalBrokerMetrics().values().stream()
|
||||
final List<MetricDTO> metrics = internalClusterMetrics.getInternalBrokerMetrics().values()
|
||||
.stream()
|
||||
.flatMap(b -> b.getMetrics().stream())
|
||||
.collect(
|
||||
Collectors.groupingBy(
|
||||
Metric::getCanonicalName,
|
||||
MetricDTO::getCanonicalName,
|
||||
Collectors.reducing(jmxClusterUtil::reduceJmxMetrics)
|
||||
)
|
||||
).values().stream()
|
||||
|
@ -653,7 +655,8 @@ public class KafkaService {
|
|||
return metricsBuilder.build();
|
||||
}
|
||||
|
||||
private Map<String, BigDecimal> findTopicMetrics(List<Metric> metrics, JmxMetricsName metricsName,
|
||||
private Map<String, BigDecimal> findTopicMetrics(List<MetricDTO> metrics,
|
||||
JmxMetricsName metricsName,
|
||||
JmxMetricsValueName valueName) {
|
||||
return metrics.stream().filter(m -> metricsName.name().equals(m.getName()))
|
||||
.filter(m -> m.getParams().containsKey("topic"))
|
||||
|
@ -707,7 +710,7 @@ public class KafkaService {
|
|||
}
|
||||
|
||||
public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
|
||||
CreateTopicMessage msg) {
|
||||
CreateTopicMessageDTO msg) {
|
||||
RecordSerDe serde =
|
||||
deserializationService.getRecordDeserializerForCluster(cluster);
|
||||
|
||||
|
@ -762,7 +765,7 @@ public class KafkaService {
|
|||
public Mono<InternalTopic> increaseTopicPartitions(
|
||||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
PartitionsIncrease partitionsIncrease) {
|
||||
PartitionsIncreaseDTO partitionsIncrease) {
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
||||
|
@ -804,7 +807,7 @@ public class KafkaService {
|
|||
public Mono<InternalTopic> changeReplicationFactor(
|
||||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||
|
@ -848,7 +851,7 @@ public class KafkaService {
|
|||
private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
|
||||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
ReplicationFactorChangeDTO replicationFactorChange) {
|
||||
// Current assignment map (Partition number -> List of brokers)
|
||||
Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(cluster, topicName);
|
||||
// Brokers map (Broker id -> count)
|
||||
|
@ -945,13 +948,13 @@ public class KafkaService {
|
|||
}
|
||||
|
||||
public Mono<Void> updateBrokerLogDir(KafkaCluster cluster, Integer broker,
|
||||
BrokerLogdirUpdate brokerLogDir) {
|
||||
BrokerLogdirUpdateDTO brokerLogDir) {
|
||||
return adminClientService.getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> updateBrokerLogDir(ac, brokerLogDir, broker));
|
||||
}
|
||||
|
||||
private Mono<Void> updateBrokerLogDir(ExtendedAdminClient adminMono,
|
||||
BrokerLogdirUpdate b,
|
||||
BrokerLogdirUpdateDTO b,
|
||||
Integer broker) {
|
||||
|
||||
Map<TopicPartitionReplica, String> req = Map.of(
|
||||
|
|
|
@ -5,8 +5,8 @@ import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KsqlCommand;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||
import java.util.List;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
@ -20,8 +20,8 @@ public class KsqlService {
|
|||
private final ClustersStorage clustersStorage;
|
||||
private final List<BaseStrategy> ksqlStatementStrategies;
|
||||
|
||||
public Mono<KsqlCommandResponse> executeKsqlCommand(String clusterName,
|
||||
Mono<KsqlCommand> ksqlCommand) {
|
||||
public Mono<KsqlCommandResponseDTO> executeKsqlCommand(String clusterName,
|
||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.map(KafkaCluster::getKsqldbServer)
|
||||
|
@ -37,7 +37,7 @@ public class KsqlService {
|
|||
}
|
||||
|
||||
private Mono<BaseStrategy> getStatementStrategyForKsqlCommand(
|
||||
Mono<KsqlCommand> ksqlCommand) {
|
||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
||||
return ksqlCommand
|
||||
.map(command -> ksqlStatementStrategies.stream()
|
||||
.filter(s -> s.test(command.getKsql()))
|
||||
|
|
|
@ -9,13 +9,13 @@ import com.provectus.kafka.ui.exception.SchemaNotFoundException;
|
|||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevel;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
|
||||
import com.provectus.kafka.ui.model.InternalSchemaRegistry;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaType;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaTypeDTO;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.ErrorResponse;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
|
||||
|
@ -56,7 +56,7 @@ public class SchemaRegistryService {
|
|||
private final ClusterMapper mapper;
|
||||
private final WebClient webClient;
|
||||
|
||||
public Flux<SchemaSubject> getAllLatestVersionSchemas(String clusterName) {
|
||||
public Flux<SchemaSubjectDTO> getAllLatestVersionSchemas(String clusterName) {
|
||||
var allSubjectNames = getAllSubjectNames(clusterName);
|
||||
return allSubjectNames
|
||||
.flatMapMany(Flux::fromArray)
|
||||
|
@ -76,7 +76,7 @@ public class SchemaRegistryService {
|
|||
.orElse(Mono.error(ClusterNotFoundException::new));
|
||||
}
|
||||
|
||||
public Flux<SchemaSubject> getAllVersionsBySubject(String clusterName, String subject) {
|
||||
public Flux<SchemaSubjectDTO> getAllVersionsBySubject(String clusterName, String subject) {
|
||||
Flux<Integer> versions = getSubjectVersions(clusterName, subject);
|
||||
return versions.flatMap(version -> getSchemaSubjectByVersion(clusterName, subject, version));
|
||||
}
|
||||
|
@ -94,17 +94,17 @@ public class SchemaRegistryService {
|
|||
).orElse(Flux.error(ClusterNotFoundException::new));
|
||||
}
|
||||
|
||||
public Mono<SchemaSubject> getSchemaSubjectByVersion(String clusterName, String schemaName,
|
||||
public Mono<SchemaSubjectDTO> getSchemaSubjectByVersion(String clusterName, String schemaName,
|
||||
Integer version) {
|
||||
return this.getSchemaSubject(clusterName, schemaName, String.valueOf(version));
|
||||
}
|
||||
|
||||
public Mono<SchemaSubject> getLatestSchemaVersionBySubject(String clusterName,
|
||||
public Mono<SchemaSubjectDTO> getLatestSchemaVersionBySubject(String clusterName,
|
||||
String schemaName) {
|
||||
return this.getSchemaSubject(clusterName, schemaName, LATEST);
|
||||
}
|
||||
|
||||
private Mono<SchemaSubject> getSchemaSubject(String clusterName, String schemaName,
|
||||
private Mono<SchemaSubjectDTO> getSchemaSubject(String clusterName, String schemaName,
|
||||
String version) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> configuredWebClient(
|
||||
|
@ -114,11 +114,11 @@ public class SchemaRegistryService {
|
|||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
|
||||
).bodyToMono(SchemaSubject.class)
|
||||
).bodyToMono(SchemaSubjectDTO.class)
|
||||
.map(this::withSchemaType)
|
||||
.zipWith(getSchemaCompatibilityInfoOrGlobal(clusterName, schemaName))
|
||||
.map(tuple -> {
|
||||
SchemaSubject schema = tuple.getT1();
|
||||
SchemaSubjectDTO schema = tuple.getT1();
|
||||
String compatibilityLevel = tuple.getT2().getCompatibility().getValue();
|
||||
schema.setCompatibilityLevel(compatibilityLevel);
|
||||
return schema;
|
||||
|
@ -128,12 +128,12 @@ public class SchemaRegistryService {
|
|||
}
|
||||
|
||||
/**
|
||||
* If {@link SchemaSubject#getSchemaType()} is null, then AVRO, otherwise,
|
||||
* If {@link SchemaSubjectDTO#getSchemaType()} is null, then AVRO, otherwise,
|
||||
* adds the schema type as is.
|
||||
*/
|
||||
@NotNull
|
||||
private SchemaSubject withSchemaType(SchemaSubject s) {
|
||||
return s.schemaType(Optional.ofNullable(s.getSchemaType()).orElse(SchemaType.AVRO));
|
||||
private SchemaSubjectDTO withSchemaType(SchemaSubjectDTO s) {
|
||||
return s.schemaType(Optional.ofNullable(s.getSchemaType()).orElse(SchemaTypeDTO.AVRO));
|
||||
}
|
||||
|
||||
public Mono<ResponseEntity<Void>> deleteSchemaSubjectByVersion(String clusterName,
|
||||
|
@ -180,12 +180,12 @@ public class SchemaRegistryService {
|
|||
* Checks whether the provided schema duplicates the previous or not, creates a new schema
|
||||
* and then returns the whole content by requesting its latest version.
|
||||
*/
|
||||
public Mono<SchemaSubject> registerNewSchema(String clusterName,
|
||||
Mono<NewSchemaSubject> newSchemaSubject) {
|
||||
public Mono<SchemaSubjectDTO> registerNewSchema(String clusterName,
|
||||
Mono<NewSchemaSubjectDTO> newSchemaSubject) {
|
||||
return newSchemaSubject
|
||||
.flatMap(schema -> {
|
||||
SchemaType schemaType =
|
||||
SchemaType.AVRO == schema.getSchemaType() ? null : schema.getSchemaType();
|
||||
SchemaTypeDTO schemaType =
|
||||
SchemaTypeDTO.AVRO == schema.getSchemaType() ? null : schema.getSchemaType();
|
||||
Mono<InternalNewSchema> newSchema =
|
||||
Mono.just(new InternalNewSchema(schema.getSchema(), schemaType));
|
||||
String subject = schema.getSubject();
|
||||
|
@ -218,7 +218,7 @@ public class SchemaRegistryService {
|
|||
}
|
||||
|
||||
@NotNull
|
||||
private Mono<SchemaSubject> checkSchemaOnDuplicate(String subject,
|
||||
private Mono<SchemaSubjectDTO> checkSchemaOnDuplicate(String subject,
|
||||
Mono<InternalNewSchema> newSchemaSubject,
|
||||
InternalSchemaRegistry schemaRegistry) {
|
||||
return configuredWebClient(
|
||||
|
@ -232,7 +232,7 @@ public class SchemaRegistryService {
|
|||
.onStatus(UNPROCESSABLE_ENTITY::equals,
|
||||
r -> r.bodyToMono(ErrorResponse.class)
|
||||
.flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
|
||||
.bodyToMono(SchemaSubject.class)
|
||||
.bodyToMono(SchemaSubjectDTO.class)
|
||||
.filter(s -> Objects.isNull(s.getId()))
|
||||
.switchIfEmpty(Mono.error(new DuplicateEntityException("Such schema already exists")));
|
||||
}
|
||||
|
@ -247,10 +247,10 @@ public class SchemaRegistryService {
|
|||
* Updates a compatibility level for a <code>schemaName</code>.
|
||||
*
|
||||
* @param schemaName is a schema subject name
|
||||
* @see com.provectus.kafka.ui.model.CompatibilityLevel.CompatibilityEnum
|
||||
* @see com.provectus.kafka.ui.model.CompatibilityLevelDTO.CompatibilityEnum
|
||||
*/
|
||||
public Mono<Void> updateSchemaCompatibility(String clusterName, String schemaName,
|
||||
Mono<CompatibilityLevel> compatibilityLevel) {
|
||||
Mono<CompatibilityLevelDTO> compatibilityLevel) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> {
|
||||
String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
|
||||
|
@ -259,7 +259,7 @@ public class SchemaRegistryService {
|
|||
HttpMethod.PUT,
|
||||
configEndpoint, schemaName)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevel.class))
|
||||
.body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevelDTO.class))
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
|
||||
|
@ -268,11 +268,11 @@ public class SchemaRegistryService {
|
|||
}
|
||||
|
||||
public Mono<Void> updateSchemaCompatibility(String clusterName,
|
||||
Mono<CompatibilityLevel> compatibilityLevel) {
|
||||
Mono<CompatibilityLevelDTO> compatibilityLevel) {
|
||||
return updateSchemaCompatibility(clusterName, null, compatibilityLevel);
|
||||
}
|
||||
|
||||
public Mono<CompatibilityLevel> getSchemaCompatibilityLevel(String clusterName,
|
||||
public Mono<CompatibilityLevelDTO> getSchemaCompatibilityLevel(String clusterName,
|
||||
String schemaName) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> {
|
||||
|
@ -288,25 +288,25 @@ public class SchemaRegistryService {
|
|||
}).orElse(Mono.empty());
|
||||
}
|
||||
|
||||
public Mono<CompatibilityLevel> getGlobalSchemaCompatibilityLevel(String clusterName) {
|
||||
public Mono<CompatibilityLevelDTO> getGlobalSchemaCompatibilityLevel(String clusterName) {
|
||||
return this.getSchemaCompatibilityLevel(clusterName, null);
|
||||
}
|
||||
|
||||
private Mono<CompatibilityLevel> getSchemaCompatibilityInfoOrGlobal(String clusterName,
|
||||
private Mono<CompatibilityLevelDTO> getSchemaCompatibilityInfoOrGlobal(String clusterName,
|
||||
String schemaName) {
|
||||
return this.getSchemaCompatibilityLevel(clusterName, schemaName)
|
||||
.switchIfEmpty(this.getGlobalSchemaCompatibilityLevel(clusterName));
|
||||
}
|
||||
|
||||
public Mono<CompatibilityCheckResponse> checksSchemaCompatibility(
|
||||
String clusterName, String schemaName, Mono<NewSchemaSubject> newSchemaSubject) {
|
||||
public Mono<CompatibilityCheckResponseDTO> checksSchemaCompatibility(
|
||||
String clusterName, String schemaName, Mono<NewSchemaSubjectDTO> newSchemaSubject) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.POST,
|
||||
"/compatibility/subjects/{schemaName}/versions/latest", schemaName)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubject.class))
|
||||
.body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubjectDTO.class))
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
|
||||
|
|
|
@ -2,9 +2,9 @@ package com.provectus.kafka.ui.strategy.ksql.statement;
|
|||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommand;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.Table;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -20,7 +20,7 @@ public abstract class BaseStrategy {
|
|||
protected static final String QUERY_REQUEST_PATH = "/query";
|
||||
private static final String MAPPING_EXCEPTION_ERROR = "KSQL DB response mapping error";
|
||||
protected String host = null;
|
||||
protected KsqlCommand ksqlCommand = null;
|
||||
protected KsqlCommandDTO ksqlCommand = null;
|
||||
|
||||
public String getUri() {
|
||||
if (this.host != null) {
|
||||
|
@ -38,11 +38,11 @@ public abstract class BaseStrategy {
|
|||
return this;
|
||||
}
|
||||
|
||||
public KsqlCommand getKsqlCommand() {
|
||||
public KsqlCommandDTO getKsqlCommand() {
|
||||
return ksqlCommand;
|
||||
}
|
||||
|
||||
public BaseStrategy ksqlCommand(KsqlCommand ksqlCommand) {
|
||||
public BaseStrategy ksqlCommand(KsqlCommandDTO ksqlCommand) {
|
||||
this.ksqlCommand = ksqlCommand;
|
||||
return this;
|
||||
}
|
||||
|
@ -51,23 +51,23 @@ public abstract class BaseStrategy {
|
|||
return BaseStrategy.KSQL_REQUEST_PATH;
|
||||
}
|
||||
|
||||
protected KsqlCommandResponse serializeTableResponse(JsonNode response, String key) {
|
||||
protected KsqlCommandResponseDTO serializeTableResponse(JsonNode response, String key) {
|
||||
JsonNode item = getResponseFirstItemValue(response, key);
|
||||
Table table = item.isArray() ? getTableFromArray(item) : getTableFromObject(item);
|
||||
return (new KsqlCommandResponse()).data(table);
|
||||
TableDTO table = item.isArray() ? getTableFromArray(item) : getTableFromObject(item);
|
||||
return (new KsqlCommandResponseDTO()).data(table);
|
||||
}
|
||||
|
||||
protected KsqlCommandResponse serializeMessageResponse(JsonNode response, String key) {
|
||||
protected KsqlCommandResponseDTO serializeMessageResponse(JsonNode response, String key) {
|
||||
JsonNode item = getResponseFirstItemValue(response, key);
|
||||
return (new KsqlCommandResponse()).message(getMessageFromObject(item));
|
||||
return (new KsqlCommandResponseDTO()).message(getMessageFromObject(item));
|
||||
}
|
||||
|
||||
protected KsqlCommandResponse serializeQueryResponse(JsonNode response) {
|
||||
protected KsqlCommandResponseDTO serializeQueryResponse(JsonNode response) {
|
||||
if (response.isArray() && response.size() > 0) {
|
||||
Table table = (new Table())
|
||||
TableDTO table = (new TableDTO())
|
||||
.headers(getQueryResponseHeader(response))
|
||||
.rows(getQueryResponseRows(response));
|
||||
return (new KsqlCommandResponse()).data(table);
|
||||
return (new KsqlCommandResponseDTO()).data(table);
|
||||
}
|
||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
||||
}
|
||||
|
@ -102,8 +102,8 @@ public abstract class BaseStrategy {
|
|||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private Table getTableFromArray(JsonNode node) {
|
||||
Table table = new Table();
|
||||
private TableDTO getTableFromArray(JsonNode node) {
|
||||
TableDTO table = new TableDTO();
|
||||
table.headers(new ArrayList<>()).rows(new ArrayList<>());
|
||||
if (node.size() > 0) {
|
||||
List<String> keys = getJsonObjectKeys(node.get(0));
|
||||
|
@ -113,14 +113,14 @@ public abstract class BaseStrategy {
|
|||
return table;
|
||||
}
|
||||
|
||||
private Table getTableFromObject(JsonNode node) {
|
||||
private TableDTO getTableFromObject(JsonNode node) {
|
||||
List<String> keys = getJsonObjectKeys(node);
|
||||
List<String> values = getJsonObjectValues(node);
|
||||
List<List<String>> rows = IntStream
|
||||
.range(0, keys.size())
|
||||
.mapToObj(i -> List.of(keys.get(i), values.get(i)))
|
||||
.collect(Collectors.toList());
|
||||
return (new Table()).headers(List.of("key", "value")).rows(rows);
|
||||
return (new TableDTO()).headers(List.of("key", "value")).rows(rows);
|
||||
}
|
||||
|
||||
private String getMessageFromObject(JsonNode node) {
|
||||
|
@ -160,7 +160,7 @@ public abstract class BaseStrategy {
|
|||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public abstract KsqlCommandResponse serializeResponse(JsonNode response);
|
||||
public abstract KsqlCommandResponseDTO serializeResponse(JsonNode response);
|
||||
|
||||
protected abstract String getTestRegExp();
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
|
@ -9,7 +9,7 @@ public class CreateStrategy extends BaseStrategy {
|
|||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
|
@ -9,7 +9,7 @@ public class DescribeStrategy extends BaseStrategy {
|
|||
private static final String RESPONSE_VALUE_KEY = "sourceDescription";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeTableResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
|
@ -9,7 +9,7 @@ public class DropStrategy extends BaseStrategy {
|
|||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
|
@ -9,7 +9,7 @@ public class ExplainStrategy extends BaseStrategy {
|
|||
private static final String RESPONSE_VALUE_KEY = "queryDescription";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeTableResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class SelectStrategy extends BaseStrategy {
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeQueryResponse(response);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommand;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
@ -16,7 +16,7 @@ public class ShowStrategy extends BaseStrategy {
|
|||
private String responseValueKey = "";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeTableResponse(response, responseValueKey);
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@ public class ShowStrategy extends BaseStrategy {
|
|||
}
|
||||
|
||||
@Override
|
||||
public BaseStrategy ksqlCommand(KsqlCommand ksqlCommand) {
|
||||
public BaseStrategy ksqlCommand(KsqlCommandDTO ksqlCommand) {
|
||||
// return new instance to avoid conflicts for parallel requests
|
||||
ShowStrategy clone = new ShowStrategy();
|
||||
clone.setResponseValueKey(responseValueKey);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
|
@ -9,7 +9,7 @@ public class TerminateStrategy extends BaseStrategy {
|
|||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponse serializeResponse(JsonNode response) {
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,11 +3,11 @@ package com.provectus.kafka.ui.util;
|
|||
import static com.provectus.kafka.ui.util.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
|
||||
import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
|
||||
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupState;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupTopicPartition;
|
||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupStateDTO;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupTopicPartitionDTO;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||
|
@ -15,9 +15,9 @@ import com.provectus.kafka.ui.model.InternalPartition;
|
|||
import com.provectus.kafka.ui.model.InternalReplica;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.MessageFormat;
|
||||
import com.provectus.kafka.ui.model.ServerStatus;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.MessageFormatDTO;
|
||||
import com.provectus.kafka.ui.model.ServerStatusDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import java.time.Instant;
|
||||
import java.time.OffsetDateTime;
|
||||
|
@ -47,7 +47,6 @@ import org.apache.kafka.common.config.ConfigResource;
|
|||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
@Log4j2
|
||||
public class ClusterUtil {
|
||||
|
@ -102,11 +101,11 @@ public class ClusterUtil {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
public static ConsumerGroup convertToConsumerGroup(InternalConsumerGroup c) {
|
||||
return convertToConsumerGroup(c, new ConsumerGroup());
|
||||
public static ConsumerGroupDTO convertToConsumerGroup(InternalConsumerGroup c) {
|
||||
return convertToConsumerGroup(c, new ConsumerGroupDTO());
|
||||
}
|
||||
|
||||
public static <T extends ConsumerGroup> T convertToConsumerGroup(
|
||||
public static <T extends ConsumerGroupDTO> T convertToConsumerGroup(
|
||||
InternalConsumerGroup c, T consumerGroup) {
|
||||
consumerGroup.setGroupId(c.getGroupId());
|
||||
consumerGroup.setMembers(c.getMembers().size());
|
||||
|
@ -138,12 +137,12 @@ public class ClusterUtil {
|
|||
return consumerGroup;
|
||||
}
|
||||
|
||||
public static ConsumerGroupDetails convertToConsumerGroupDetails(InternalConsumerGroup g) {
|
||||
final ConsumerGroupDetails details = convertToConsumerGroup(g, new ConsumerGroupDetails());
|
||||
Map<TopicPartition, ConsumerGroupTopicPartition> partitionMap = new HashMap<>();
|
||||
public static ConsumerGroupDetailsDTO convertToConsumerGroupDetails(InternalConsumerGroup g) {
|
||||
ConsumerGroupDetailsDTO details = convertToConsumerGroup(g, new ConsumerGroupDetailsDTO());
|
||||
Map<TopicPartition, ConsumerGroupTopicPartitionDTO> partitionMap = new HashMap<>();
|
||||
|
||||
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : g.getOffsets().entrySet()) {
|
||||
ConsumerGroupTopicPartition partition = new ConsumerGroupTopicPartition();
|
||||
ConsumerGroupTopicPartitionDTO partition = new ConsumerGroupTopicPartitionDTO();
|
||||
partition.setTopic(entry.getKey().topic());
|
||||
partition.setPartition(entry.getKey().partition());
|
||||
partition.setCurrentOffset(entry.getValue().offset());
|
||||
|
@ -162,8 +161,9 @@ public class ClusterUtil {
|
|||
|
||||
for (InternalConsumerGroup.InternalMember member : g.getMembers()) {
|
||||
for (TopicPartition topicPartition : member.getAssignment()) {
|
||||
final ConsumerGroupTopicPartition partition = partitionMap.computeIfAbsent(topicPartition,
|
||||
(tp) -> new ConsumerGroupTopicPartition()
|
||||
final ConsumerGroupTopicPartitionDTO partition = partitionMap.computeIfAbsent(
|
||||
topicPartition,
|
||||
(tp) -> new ConsumerGroupTopicPartitionDTO()
|
||||
.topic(tp.topic())
|
||||
.partition(tp.partition())
|
||||
);
|
||||
|
@ -176,19 +176,19 @@ public class ClusterUtil {
|
|||
return details;
|
||||
}
|
||||
|
||||
private static Broker mapCoordinator(Node node) {
|
||||
return new Broker().host(node.host()).id(node.id());
|
||||
private static BrokerDTO mapCoordinator(Node node) {
|
||||
return new BrokerDTO().host(node.host()).id(node.id());
|
||||
}
|
||||
|
||||
private static ConsumerGroupState mapConsumerGroupState(
|
||||
private static ConsumerGroupStateDTO mapConsumerGroupState(
|
||||
org.apache.kafka.common.ConsumerGroupState state) {
|
||||
switch (state) {
|
||||
case DEAD: return ConsumerGroupState.DEAD;
|
||||
case EMPTY: return ConsumerGroupState.EMPTY;
|
||||
case STABLE: return ConsumerGroupState.STABLE;
|
||||
case PREPARING_REBALANCE: return ConsumerGroupState.PREPARING_REBALANCE;
|
||||
case COMPLETING_REBALANCE: return ConsumerGroupState.COMPLETING_REBALANCE;
|
||||
default: return ConsumerGroupState.UNKNOWN;
|
||||
case DEAD: return ConsumerGroupStateDTO.DEAD;
|
||||
case EMPTY: return ConsumerGroupStateDTO.EMPTY;
|
||||
case STABLE: return ConsumerGroupStateDTO.STABLE;
|
||||
case PREPARING_REBALANCE: return ConsumerGroupStateDTO.PREPARING_REBALANCE;
|
||||
case COMPLETING_REBALANCE: return ConsumerGroupStateDTO.COMPLETING_REBALANCE;
|
||||
default: return ConsumerGroupStateDTO.UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,12 +275,12 @@ public class ClusterUtil {
|
|||
return topic.build();
|
||||
}
|
||||
|
||||
public static int convertToIntServerStatus(ServerStatus serverStatus) {
|
||||
return serverStatus.equals(ServerStatus.ONLINE) ? 1 : 0;
|
||||
public static int convertToIntServerStatus(ServerStatusDTO serverStatus) {
|
||||
return serverStatus.equals(ServerStatusDTO.ONLINE) ? 1 : 0;
|
||||
}
|
||||
|
||||
public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
|
||||
RecordSerDe recordDeserializer) {
|
||||
public static TopicMessageDTO mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
|
||||
RecordSerDe recordDeserializer) {
|
||||
|
||||
Map<String, String> headers = new HashMap<>();
|
||||
consumerRecord.headers().iterator()
|
||||
|
@ -291,11 +291,11 @@ public class ClusterUtil {
|
|||
)
|
||||
);
|
||||
|
||||
TopicMessage topicMessage = new TopicMessage();
|
||||
TopicMessageDTO topicMessage = new TopicMessageDTO();
|
||||
|
||||
OffsetDateTime timestamp =
|
||||
OffsetDateTime.ofInstant(Instant.ofEpochMilli(consumerRecord.timestamp()), UTC_ZONE_ID);
|
||||
TopicMessage.TimestampTypeEnum timestampType =
|
||||
TopicMessageDTO.TimestampTypeEnum timestampType =
|
||||
mapToTimestampType(consumerRecord.timestampType());
|
||||
topicMessage.setPartition(consumerRecord.partition());
|
||||
topicMessage.setOffset(consumerRecord.offset());
|
||||
|
@ -307,10 +307,10 @@ public class ClusterUtil {
|
|||
topicMessage.setKey(parsed.getKey());
|
||||
topicMessage.setContent(parsed.getValue());
|
||||
topicMessage.setKeyFormat(parsed.getKeyFormat() != null
|
||||
? MessageFormat.valueOf(parsed.getKeyFormat().name())
|
||||
? MessageFormatDTO.valueOf(parsed.getKeyFormat().name())
|
||||
: null);
|
||||
topicMessage.setValueFormat(parsed.getValueFormat() != null
|
||||
? MessageFormat.valueOf(parsed.getValueFormat().name())
|
||||
? MessageFormatDTO.valueOf(parsed.getValueFormat().name())
|
||||
: null);
|
||||
topicMessage.setKeySize(ConsumerRecordUtil.getKeySize(consumerRecord));
|
||||
topicMessage.setValueSize(ConsumerRecordUtil.getValueSize(consumerRecord));
|
||||
|
@ -321,14 +321,14 @@ public class ClusterUtil {
|
|||
return topicMessage;
|
||||
}
|
||||
|
||||
private static TopicMessage.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
switch (timestampType) {
|
||||
case CREATE_TIME:
|
||||
return TopicMessage.TimestampTypeEnum.CREATE_TIME;
|
||||
return TopicMessageDTO.TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME:
|
||||
return TopicMessage.TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
return TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE:
|
||||
return TopicMessage.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
return TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown timestampType: " + timestampType);
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class FilterTopicMessageEvents implements Predicate<TopicMessageEvent> {
|
||||
public class FilterTopicMessageEvents implements Predicate<TopicMessageEventDTO> {
|
||||
private final AtomicInteger processed = new AtomicInteger();
|
||||
private final int limit;
|
||||
|
||||
|
@ -13,8 +13,8 @@ public class FilterTopicMessageEvents implements Predicate<TopicMessageEvent> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean test(TopicMessageEvent event) {
|
||||
if (event.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE)) {
|
||||
public boolean test(TopicMessageEventDTO event) {
|
||||
if (event.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE)) {
|
||||
final int i = processed.incrementAndGet();
|
||||
if (i > limit) {
|
||||
return false;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.JmxConnectionInfo;
|
||||
import com.provectus.kafka.ui.model.Metric;
|
||||
import com.provectus.kafka.ui.model.MetricDTO;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -36,7 +36,7 @@ public class JmxClusterUtil {
|
|||
private final KeyedObjectPool<JmxConnectionInfo, JMXConnector> pool;
|
||||
|
||||
@SneakyThrows
|
||||
public List<Metric> getJmxMetrics(String host, int port, boolean jmxSsl,
|
||||
public List<MetricDTO> getJmxMetrics(String host, int port, boolean jmxSsl,
|
||||
@Nullable String username, @Nullable String password) {
|
||||
String jmxUrl = JMX_URL + host + ":" + port + "/" + JMX_SERVICE_TYPE;
|
||||
final var connectionInfo = JmxConnectionInfo.builder()
|
||||
|
@ -53,7 +53,7 @@ public class JmxClusterUtil {
|
|||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
List<Metric> result = new ArrayList<>();
|
||||
List<MetricDTO> result = new ArrayList<>();
|
||||
try {
|
||||
MBeanServerConnection msc = srv.getMBeanServerConnection();
|
||||
var jmxMetrics = msc.queryNames(null, null).stream()
|
||||
|
@ -61,7 +61,7 @@ public class JmxClusterUtil {
|
|||
.collect(Collectors.toList());
|
||||
for (ObjectName jmxMetric : jmxMetrics) {
|
||||
final Hashtable<String, String> params = jmxMetric.getKeyPropertyList();
|
||||
Metric metric = new Metric();
|
||||
MetricDTO metric = new MetricDTO();
|
||||
metric.setName(params.get(NAME_METRIC_FIELD));
|
||||
metric.setCanonicalName(jmxMetric.getCanonicalName());
|
||||
metric.setParams(params);
|
||||
|
@ -99,8 +99,8 @@ public class JmxClusterUtil {
|
|||
}
|
||||
}
|
||||
|
||||
public Metric reduceJmxMetrics(Metric metric1, Metric metric2) {
|
||||
var result = new Metric();
|
||||
public MetricDTO reduceJmxMetrics(MetricDTO metric1, MetricDTO metric2) {
|
||||
var result = new MetricDTO();
|
||||
Map<String, BigDecimal> value = Stream.concat(
|
||||
metric1.getValue().entrySet().stream(),
|
||||
metric2.getValue().entrySet().stream()
|
||||
|
@ -115,7 +115,7 @@ public class JmxClusterUtil {
|
|||
return result;
|
||||
}
|
||||
|
||||
private boolean isWellKnownMetric(Metric metric) {
|
||||
private boolean isWellKnownMetric(MetricDTO metric) {
|
||||
final Optional<String> param =
|
||||
Optional.ofNullable(metric.getParams().get(NAME_METRIC_FIELD)).filter(p ->
|
||||
Arrays.stream(JmxMetricsName.values()).map(Enum::name)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -29,7 +29,7 @@ public abstract class OffsetsSeek {
|
|||
}
|
||||
|
||||
public Map<TopicPartition, Long> getPartitionsOffsets(Consumer<Bytes, Bytes> consumer) {
|
||||
SeekType seekType = consumerPosition.getSeekType();
|
||||
SeekTypeDTO seekType = consumerPosition.getSeekType();
|
||||
List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
||||
log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
|
||||
Map<TopicPartition, Long> offsets;
|
||||
|
|
|
@ -3,16 +3,16 @@ package com.provectus.kafka.ui;
|
|||
import static java.util.function.Predicate.not;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import com.provectus.kafka.ui.model.Connector;
|
||||
import com.provectus.kafka.ui.model.ConnectorPlugin;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfig;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValue;
|
||||
import com.provectus.kafka.ui.model.ConnectorState;
|
||||
import com.provectus.kafka.ui.model.ConnectorStatus;
|
||||
import com.provectus.kafka.ui.model.ConnectorType;
|
||||
import com.provectus.kafka.ui.model.NewConnector;
|
||||
import com.provectus.kafka.ui.model.TaskId;
|
||||
import com.provectus.kafka.ui.model.ConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponseDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginConfigValueDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorPluginDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorStateDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorStatusDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorTypeDTO;
|
||||
import com.provectus.kafka.ui.model.NewConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.TaskIdDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
@ -48,7 +48,7 @@ public class KafkaConnectServiceTests extends AbstractBaseTest {
|
|||
public void setUp() {
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
|
||||
.bodyValue(new NewConnector()
|
||||
.bodyValue(new NewConnectorDTO()
|
||||
.name(connectorName)
|
||||
.config(Map.of(
|
||||
"connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
|
||||
|
@ -168,15 +168,15 @@ public class KafkaConnectServiceTests extends AbstractBaseTest {
|
|||
|
||||
@Test
|
||||
public void shouldRetrieveConnector() {
|
||||
Connector expected = (Connector) new Connector()
|
||||
ConnectorDTO expected = (ConnectorDTO) new ConnectorDTO()
|
||||
.connect(connectName)
|
||||
.status(new ConnectorStatus()
|
||||
.state(ConnectorState.RUNNING)
|
||||
.status(new ConnectorStatusDTO()
|
||||
.state(ConnectorStateDTO.RUNNING)
|
||||
.workerId("kafka-connect:8083"))
|
||||
.tasks(List.of(new TaskId()
|
||||
.tasks(List.of(new TaskIdDTO()
|
||||
.connector(connectorName)
|
||||
.task(0)))
|
||||
.type(ConnectorType.SINK)
|
||||
.type(ConnectorTypeDTO.SINK)
|
||||
.name(connectorName)
|
||||
.config(config);
|
||||
webTestClient.get()
|
||||
|
@ -184,7 +184,7 @@ public class KafkaConnectServiceTests extends AbstractBaseTest {
|
|||
connectName, connectorName)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(Connector.class)
|
||||
.expectBody(ConnectorDTO.class)
|
||||
.value(connector -> assertEquals(expected, connector));
|
||||
}
|
||||
|
||||
|
@ -334,7 +334,7 @@ public class KafkaConnectServiceTests extends AbstractBaseTest {
|
|||
.uri("/api/clusters/{clusterName}/connects/{connectName}/plugins", LOCAL, connectName)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBodyList(ConnectorPlugin.class)
|
||||
.expectBodyList(ConnectorPluginDTO.class)
|
||||
.value(plugins -> assertEquals(14, plugins.size()));
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ public class KafkaConnectServiceTests extends AbstractBaseTest {
|
|||
)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(ConnectorPluginConfigValidationResponse.class)
|
||||
.expectBody(ConnectorPluginConfigValidationResponseDTO.class)
|
||||
.value(response -> assertEquals(0, response.getErrorCount()));
|
||||
}
|
||||
|
||||
|
@ -376,12 +376,12 @@ public class KafkaConnectServiceTests extends AbstractBaseTest {
|
|||
)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(ConnectorPluginConfigValidationResponse.class)
|
||||
.expectBody(ConnectorPluginConfigValidationResponseDTO.class)
|
||||
.value(response -> {
|
||||
assertEquals(1, response.getErrorCount());
|
||||
var error = response.getConfigs().stream()
|
||||
.map(ConnectorPluginConfig::getValue)
|
||||
.map(ConnectorPluginConfigValue::getErrors)
|
||||
.map(ConnectorPluginConfigDTO::getValue)
|
||||
.map(ConnectorPluginConfigValueDTO::getErrors)
|
||||
.filter(not(List::isEmpty))
|
||||
.findFirst().get();
|
||||
assertEquals(
|
||||
|
|
|
@ -4,12 +4,12 @@ import static org.assertj.core.api.Assertions.assertThat;
|
|||
import static org.springframework.http.MediaType.TEXT_EVENT_STREAM;
|
||||
|
||||
import com.provectus.kafka.ui.api.model.TopicConfig;
|
||||
import com.provectus.kafka.ui.model.BrokerConfig;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicDetails;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseDTO;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.producer.KafkaTestProducer;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -37,7 +37,7 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
var topicName = UUID.randomUUID().toString();
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/topics", LOCAL)
|
||||
.bodyValue(new TopicCreation()
|
||||
.bodyValue(new TopicCreationDTO()
|
||||
.name(topicName)
|
||||
.partitions(1)
|
||||
.replicationFactor(1)
|
||||
|
@ -58,11 +58,11 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
.exchange()
|
||||
.expectStatus()
|
||||
.isOk()
|
||||
.expectBodyList(TopicMessageEvent.class)
|
||||
.expectBodyList(TopicMessageEventDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody()
|
||||
.stream()
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.count();
|
||||
|
||||
assertThat(count).isEqualTo(4);
|
||||
|
@ -78,11 +78,11 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
.exchange()
|
||||
.expectStatus()
|
||||
.isOk()
|
||||
.expectBodyList(TopicMessageEvent.class)
|
||||
.expectBodyList(TopicMessageEventDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody()
|
||||
.stream()
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.count();
|
||||
|
||||
assertThat(count).isZero();
|
||||
|
@ -93,7 +93,7 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
var topicName = UUID.randomUUID().toString();
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/topics", LOCAL)
|
||||
.bodyValue(new TopicCreation()
|
||||
.bodyValue(new TopicCreationDTO()
|
||||
.name(topicName)
|
||||
.partitions(1)
|
||||
.replicationFactor(1)
|
||||
|
@ -103,31 +103,31 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
.expectStatus()
|
||||
.isOk();
|
||||
|
||||
PartitionsIncreaseResponse response = webTestClient.patch()
|
||||
PartitionsIncreaseResponseDTO response = webTestClient.patch()
|
||||
.uri("/api/clusters/{clusterName}/topics/{topicName}/partitions",
|
||||
LOCAL,
|
||||
topicName)
|
||||
.bodyValue(new PartitionsIncrease()
|
||||
.bodyValue(new PartitionsIncreaseDTO()
|
||||
.totalPartitionsCount(10)
|
||||
)
|
||||
.exchange()
|
||||
.expectStatus()
|
||||
.isOk()
|
||||
.expectBody(PartitionsIncreaseResponse.class)
|
||||
.expectBody(PartitionsIncreaseResponseDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
assert response != null;
|
||||
Assertions.assertEquals(10, response.getTotalPartitionsCount());
|
||||
|
||||
TopicDetails topicDetails = webTestClient.get()
|
||||
TopicDetailsDTO topicDetails = webTestClient.get()
|
||||
.uri("/api/clusters/{clusterName}/topics/{topicName}",
|
||||
LOCAL,
|
||||
topicName)
|
||||
.exchange()
|
||||
.expectStatus()
|
||||
.isOk()
|
||||
.expectBody(TopicDetails.class)
|
||||
.expectBody(TopicDetailsDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
|
@ -156,14 +156,14 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
public void shouldReturnConfigsForBroker() {
|
||||
var topicName = UUID.randomUUID().toString();
|
||||
|
||||
List<BrokerConfig> configs = webTestClient.get()
|
||||
List<BrokerConfigDTO> configs = webTestClient.get()
|
||||
.uri("/api/clusters/{clusterName}/brokers/{id}/configs",
|
||||
LOCAL,
|
||||
1)
|
||||
.exchange()
|
||||
.expectStatus()
|
||||
.isOk()
|
||||
.expectBodyList(BrokerConfig.class)
|
||||
.expectBodyList(BrokerConfigDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
|
@ -193,7 +193,7 @@ public class KafkaConsumerTests extends AbstractBaseTest {
|
|||
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/topics", LOCAL)
|
||||
.bodyValue(new TopicCreation()
|
||||
.bodyValue(new TopicCreationDTO()
|
||||
.name(topicName)
|
||||
.partitions(1)
|
||||
.replicationFactor(1)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import java.util.UUID;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -16,11 +16,11 @@ import org.springframework.test.web.reactive.server.WebTestClient;
|
|||
public class KafkaTopicCreateTests extends AbstractBaseTest {
|
||||
@Autowired
|
||||
private WebTestClient webTestClient;
|
||||
private TopicCreation topicCreation;
|
||||
private TopicCreationDTO topicCreation;
|
||||
|
||||
@BeforeEach
|
||||
public void setUpBefore() {
|
||||
this.topicCreation = new TopicCreation()
|
||||
this.topicCreation = new TopicCreationDTO()
|
||||
.replicationFactor(1)
|
||||
.partitions(3)
|
||||
.name(UUID.randomUUID().toString());
|
||||
|
@ -38,7 +38,7 @@ public class KafkaTopicCreateTests extends AbstractBaseTest {
|
|||
|
||||
@Test
|
||||
void shouldReturn400IfTopicAlreadyExists() {
|
||||
TopicCreation topicCreation = new TopicCreation()
|
||||
TopicCreationDTO topicCreation = new TopicCreationDTO()
|
||||
.replicationFactor(1)
|
||||
.partitions(3)
|
||||
.name(UUID.randomUUID().toString());
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package com.provectus.kafka.ui;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicUpdate;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
|
@ -25,7 +25,7 @@ public class ReadOnlyModeTests extends AbstractBaseTest {
|
|||
var topicName = UUID.randomUUID().toString();
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/topics", LOCAL)
|
||||
.bodyValue(new TopicCreation()
|
||||
.bodyValue(new TopicCreationDTO()
|
||||
.name(topicName)
|
||||
.partitions(1)
|
||||
.replicationFactor(1)
|
||||
|
@ -41,7 +41,7 @@ public class ReadOnlyModeTests extends AbstractBaseTest {
|
|||
var topicName = UUID.randomUUID().toString();
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/topics", SECOND_LOCAL)
|
||||
.bodyValue(new TopicCreation()
|
||||
.bodyValue(new TopicCreationDTO()
|
||||
.name(topicName)
|
||||
.partitions(1)
|
||||
.replicationFactor(1)
|
||||
|
@ -57,7 +57,7 @@ public class ReadOnlyModeTests extends AbstractBaseTest {
|
|||
var topicName = UUID.randomUUID().toString();
|
||||
webTestClient.post()
|
||||
.uri("/api/clusters/{clusterName}/topics", LOCAL)
|
||||
.bodyValue(new TopicCreation()
|
||||
.bodyValue(new TopicCreationDTO()
|
||||
.name(topicName)
|
||||
.partitions(1)
|
||||
.replicationFactor(1)
|
||||
|
@ -68,7 +68,7 @@ public class ReadOnlyModeTests extends AbstractBaseTest {
|
|||
.isOk();
|
||||
webTestClient.patch()
|
||||
.uri("/api/clusters/{clusterName}/topics/{topicName}", LOCAL, topicName)
|
||||
.bodyValue(new TopicUpdate()
|
||||
.bodyValue(new TopicUpdateDTO()
|
||||
.configs(Map.of())
|
||||
)
|
||||
.exchange()
|
||||
|
@ -81,7 +81,7 @@ public class ReadOnlyModeTests extends AbstractBaseTest {
|
|||
var topicName = UUID.randomUUID().toString();
|
||||
webTestClient.patch()
|
||||
.uri("/api/clusters/{clusterName}/topics/{topicName}", SECOND_LOCAL, topicName)
|
||||
.bodyValue(new TopicUpdate()
|
||||
.bodyValue(new TopicUpdateDTO()
|
||||
.configs(Map.of())
|
||||
)
|
||||
.exchange()
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package com.provectus.kafka.ui;
|
||||
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevel;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaType;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaTypeDTO;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
|
@ -96,27 +96,27 @@ class SchemaRegistryServiceTests extends AbstractBaseTest {
|
|||
void shouldCreateNewProtobufSchema() {
|
||||
String schema =
|
||||
"syntax = \"proto3\";\n\nmessage MyRecord {\n int32 id = 1;\n string name = 2;\n}\n";
|
||||
NewSchemaSubject requestBody = new NewSchemaSubject()
|
||||
.schemaType(SchemaType.PROTOBUF)
|
||||
NewSchemaSubjectDTO requestBody = new NewSchemaSubjectDTO()
|
||||
.schemaType(SchemaTypeDTO.PROTOBUF)
|
||||
.subject(subject)
|
||||
.schema(schema);
|
||||
SchemaSubject actual = webTestClient
|
||||
SchemaSubjectDTO actual = webTestClient
|
||||
.post()
|
||||
.uri("/api/clusters/{clusterName}/schemas", LOCAL)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(Mono.just(requestBody), NewSchemaSubject.class))
|
||||
.body(BodyInserters.fromPublisher(Mono.just(requestBody), NewSchemaSubjectDTO.class))
|
||||
.exchange()
|
||||
.expectStatus()
|
||||
.isOk()
|
||||
.expectBody(SchemaSubject.class)
|
||||
.expectBody(SchemaSubjectDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
Assertions.assertNotNull(actual);
|
||||
Assertions.assertEquals(CompatibilityLevel.CompatibilityEnum.BACKWARD.name(),
|
||||
Assertions.assertEquals(CompatibilityLevelDTO.CompatibilityEnum.BACKWARD.name(),
|
||||
actual.getCompatibilityLevel());
|
||||
Assertions.assertEquals("1", actual.getVersion());
|
||||
Assertions.assertEquals(SchemaType.PROTOBUF, actual.getSchemaType());
|
||||
Assertions.assertEquals(SchemaTypeDTO.PROTOBUF, actual.getSchemaType());
|
||||
Assertions.assertEquals(schema, actual.getSchema());
|
||||
}
|
||||
|
||||
|
@ -127,11 +127,11 @@ class SchemaRegistryServiceTests extends AbstractBaseTest {
|
|||
.uri("/api/clusters/{clusterName}/schemas/compatibility", LOCAL)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(CompatibilityLevel.class)
|
||||
.expectBody(CompatibilityLevelDTO.class)
|
||||
.consumeWith(result -> {
|
||||
CompatibilityLevel responseBody = result.getResponseBody();
|
||||
CompatibilityLevelDTO responseBody = result.getResponseBody();
|
||||
Assertions.assertNotNull(responseBody);
|
||||
Assertions.assertEquals(CompatibilityLevel.CompatibilityEnum.BACKWARD,
|
||||
Assertions.assertEquals(CompatibilityLevelDTO.CompatibilityEnum.BACKWARD,
|
||||
responseBody.getCompatibility());
|
||||
});
|
||||
}
|
||||
|
@ -145,14 +145,14 @@ class SchemaRegistryServiceTests extends AbstractBaseTest {
|
|||
.uri("/api/clusters/{clusterName}/schemas", LOCAL)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBodyList(SchemaSubject.class)
|
||||
.expectBodyList(SchemaSubjectDTO.class)
|
||||
.consumeWith(result -> {
|
||||
List<SchemaSubject> responseBody = result.getResponseBody();
|
||||
List<SchemaSubjectDTO> responseBody = result.getResponseBody();
|
||||
log.info("Response of test schemas: {}", responseBody);
|
||||
Assertions.assertNotNull(responseBody);
|
||||
Assertions.assertFalse(responseBody.isEmpty());
|
||||
|
||||
SchemaSubject actualSchemaSubject = responseBody.stream()
|
||||
SchemaSubjectDTO actualSchemaSubject = responseBody.stream()
|
||||
.filter(schemaSubject -> subject.equals(schemaSubject.getSubject()))
|
||||
.findFirst()
|
||||
.orElseThrow();
|
||||
|
@ -173,10 +173,10 @@ class SchemaRegistryServiceTests extends AbstractBaseTest {
|
|||
.uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, subject)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBodyList(SchemaSubject.class)
|
||||
.expectBodyList(SchemaSubjectDTO.class)
|
||||
.consumeWith(listEntityExchangeResult -> {
|
||||
val expectedCompatibility =
|
||||
CompatibilityLevel.CompatibilityEnum.BACKWARD;
|
||||
CompatibilityLevelDTO.CompatibilityEnum.BACKWARD;
|
||||
assertSchemaWhenGetLatest(subject, listEntityExchangeResult, expectedCompatibility);
|
||||
});
|
||||
|
||||
|
@ -196,10 +196,10 @@ class SchemaRegistryServiceTests extends AbstractBaseTest {
|
|||
.uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, subject)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBodyList(SchemaSubject.class)
|
||||
.expectBodyList(SchemaSubjectDTO.class)
|
||||
.consumeWith(listEntityExchangeResult -> {
|
||||
val expectedCompatibility =
|
||||
CompatibilityLevel.CompatibilityEnum.FULL;
|
||||
CompatibilityLevelDTO.CompatibilityEnum.FULL;
|
||||
assertSchemaWhenGetLatest(subject, listEntityExchangeResult, expectedCompatibility);
|
||||
});
|
||||
}
|
||||
|
@ -218,34 +218,34 @@ class SchemaRegistryServiceTests extends AbstractBaseTest {
|
|||
))
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(SchemaSubject.class)
|
||||
.expectBody(SchemaSubjectDTO.class)
|
||||
.consumeWith(this::assertResponseBodyWhenCreateNewSchema);
|
||||
}
|
||||
|
||||
private void assertSchemaWhenGetLatest(
|
||||
String subject, EntityExchangeResult<List<SchemaSubject>> listEntityExchangeResult,
|
||||
CompatibilityLevel.CompatibilityEnum expectedCompatibility) {
|
||||
List<SchemaSubject> responseBody = listEntityExchangeResult.getResponseBody();
|
||||
String subject, EntityExchangeResult<List<SchemaSubjectDTO>> listEntityExchangeResult,
|
||||
CompatibilityLevelDTO.CompatibilityEnum expectedCompatibility) {
|
||||
List<SchemaSubjectDTO> responseBody = listEntityExchangeResult.getResponseBody();
|
||||
Assertions.assertNotNull(responseBody);
|
||||
Assertions.assertEquals(1, responseBody.size());
|
||||
SchemaSubject actualSchema = responseBody.get(0);
|
||||
SchemaSubjectDTO actualSchema = responseBody.get(0);
|
||||
Assertions.assertNotNull(actualSchema);
|
||||
Assertions.assertEquals(subject, actualSchema.getSubject());
|
||||
Assertions.assertEquals("\"string\"", actualSchema.getSchema());
|
||||
|
||||
Assertions.assertNotNull(actualSchema.getCompatibilityLevel());
|
||||
Assertions.assertEquals(SchemaType.AVRO, actualSchema.getSchemaType());
|
||||
Assertions.assertEquals(SchemaTypeDTO.AVRO, actualSchema.getSchemaType());
|
||||
Assertions.assertEquals(expectedCompatibility.name(), actualSchema.getCompatibilityLevel());
|
||||
}
|
||||
|
||||
private void assertResponseBodyWhenCreateNewSchema(
|
||||
EntityExchangeResult<SchemaSubject> exchangeResult) {
|
||||
SchemaSubject responseBody = exchangeResult.getResponseBody();
|
||||
EntityExchangeResult<SchemaSubjectDTO> exchangeResult) {
|
||||
SchemaSubjectDTO responseBody = exchangeResult.getResponseBody();
|
||||
Assertions.assertNotNull(responseBody);
|
||||
Assertions.assertEquals("1", responseBody.getVersion());
|
||||
Assertions.assertNotNull(responseBody.getSchema());
|
||||
Assertions.assertNotNull(responseBody.getSubject());
|
||||
Assertions.assertNotNull(responseBody.getCompatibilityLevel());
|
||||
Assertions.assertEquals(SchemaType.AVRO, responseBody.getSchemaType());
|
||||
Assertions.assertEquals(SchemaTypeDTO.AVRO, responseBody.getSchemaType());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import com.provectus.kafka.ui.mapper.ClusterMapper;
|
|||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSort;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
@ -67,7 +67,7 @@ class ClusterServiceTest {
|
|||
Optional.empty(), Optional.empty());
|
||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||
assertThat(topics.getTopics()).hasSize(25);
|
||||
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
|
||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -97,7 +97,7 @@ class ClusterServiceTest {
|
|||
Optional.empty(), Optional.empty(), Optional.empty());
|
||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||
assertThat(topics.getTopics()).hasSize(1)
|
||||
.first().extracting(Topic::getName).isEqualTo("99");
|
||||
.first().extracting(TopicDTO::getName).isEqualTo("99");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -127,7 +127,7 @@ class ClusterServiceTest {
|
|||
Optional.empty(), Optional.empty(), Optional.empty());
|
||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||
assertThat(topics.getTopics()).hasSize(25);
|
||||
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
|
||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -159,7 +159,7 @@ class ClusterServiceTest {
|
|||
Optional.empty(), Optional.empty());
|
||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||
assertThat(topics.getTopics()).hasSize(25);
|
||||
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
|
||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||
}
|
||||
|
||||
|
||||
|
@ -192,7 +192,7 @@ class ClusterServiceTest {
|
|||
Optional.empty(), Optional.empty());
|
||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||
assertThat(topics.getTopics()).hasSize(25);
|
||||
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
|
||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||
}
|
||||
|
||||
|
||||
|
@ -224,7 +224,7 @@ class ClusterServiceTest {
|
|||
Optional.of("1"), Optional.empty());
|
||||
assertThat(topics.getPageCount()).isEqualTo(1);
|
||||
assertThat(topics.getTopics()).hasSize(20);
|
||||
assertThat(topics.getTopics()).map(Topic::getName).isSorted();
|
||||
assertThat(topics.getTopics()).map(TopicDTO::getName).isSorted();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -253,10 +253,10 @@ class ClusterServiceTest {
|
|||
|
||||
var topics = clusterService.getTopics(topicName,
|
||||
Optional.empty(), Optional.empty(), Optional.empty(),
|
||||
Optional.empty(), Optional.of(TopicColumnsToSort.TOTAL_PARTITIONS));
|
||||
Optional.empty(), Optional.of(TopicColumnsToSortDTO.TOTAL_PARTITIONS));
|
||||
assertThat(topics.getPageCount()).isEqualTo(4);
|
||||
assertThat(topics.getTopics()).hasSize(25);
|
||||
assertThat(topics.getTopics()).map(Topic::getPartitionCount).isSorted();
|
||||
assertThat(topics.getTopics()).map(TopicDTO::getPartitionCount).isSorted();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -3,7 +3,7 @@ package com.provectus.kafka.ui.service;
|
|||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.AbstractBaseTest;
|
||||
import com.provectus.kafka.ui.model.BrokerConfig;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -25,7 +25,7 @@ public class ConfigTest extends AbstractBaseTest {
|
|||
public void testAlterConfig() throws Exception {
|
||||
String name = "background.threads";
|
||||
|
||||
Optional<BrokerConfig> bc = getConfig(name);
|
||||
Optional<BrokerConfigDTO> bc = getConfig(name);
|
||||
assertThat(bc.isPresent()).isTrue();
|
||||
assertThat(bc.get().getValue()).isEqualTo("10");
|
||||
|
||||
|
@ -42,7 +42,7 @@ public class ConfigTest extends AbstractBaseTest {
|
|||
// Without sleep it returns old config so we need to wait a little bit
|
||||
Thread.sleep(1000);
|
||||
|
||||
Optional<BrokerConfig> bcc = getConfig(name);
|
||||
Optional<BrokerConfigDTO> bcc = getConfig(name);
|
||||
assertThat(bcc.isPresent()).isTrue();
|
||||
assertThat(bcc.get().getValue()).isEqualTo("5");
|
||||
}
|
||||
|
@ -62,12 +62,12 @@ public class ConfigTest extends AbstractBaseTest {
|
|||
.expectStatus().isBadRequest();
|
||||
}
|
||||
|
||||
private Optional<BrokerConfig> getConfig(String name) {
|
||||
List<BrokerConfig> configs = webTestClient.get()
|
||||
private Optional<BrokerConfigDTO> getConfig(String name) {
|
||||
List<BrokerConfigDTO> configs = webTestClient.get()
|
||||
.uri("/api/clusters/{clusterName}/brokers/{id}/configs", LOCAL, 1)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokerConfig>>() {
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokerConfigDTO>>() {
|
||||
})
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
|
|
@ -11,8 +11,8 @@ import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KsqlCommand;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy;
|
||||
|
@ -53,7 +53,7 @@ class KsqlServiceTest {
|
|||
@Test
|
||||
void shouldThrowClusterNotFoundExceptionOnExecuteKsqlCommand() {
|
||||
String clusterName = "test";
|
||||
KsqlCommand command = (new KsqlCommand()).ksql("show streams;");
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
||||
when(clustersStorage.getClusterByName(clusterName)).thenReturn(Optional.ofNullable(null));
|
||||
|
||||
StepVerifier.create(ksqlService.executeKsqlCommand(clusterName, Mono.just(command)))
|
||||
|
@ -63,7 +63,7 @@ class KsqlServiceTest {
|
|||
@Test
|
||||
void shouldThrowKsqlDbNotFoundExceptionOnExecuteKsqlCommand() {
|
||||
String clusterName = "test";
|
||||
KsqlCommand command = (new KsqlCommand()).ksql("show streams;");
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
when(clustersStorage.getClusterByName(clusterName))
|
||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
||||
|
@ -76,8 +76,8 @@ class KsqlServiceTest {
|
|||
@Test
|
||||
void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() {
|
||||
String clusterName = "test";
|
||||
KsqlCommand command =
|
||||
(new KsqlCommand()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');");
|
||||
KsqlCommandDTO command =
|
||||
(new KsqlCommandDTO()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
when(clustersStorage.getClusterByName(clusterName))
|
||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
||||
|
@ -94,13 +94,13 @@ class KsqlServiceTest {
|
|||
void shouldSetHostToStrategy() {
|
||||
String clusterName = "test";
|
||||
String host = "localhost:8088";
|
||||
KsqlCommand command = (new KsqlCommand()).ksql("describe streams;");
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
|
||||
when(clustersStorage.getClusterByName(clusterName))
|
||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
||||
when(kafkaCluster.getKsqldbServer()).thenReturn(host);
|
||||
when(ksqlClient.execute(any())).thenReturn(Mono.just(new KsqlCommandResponse()));
|
||||
when(ksqlClient.execute(any())).thenReturn(Mono.just(new KsqlCommandResponseDTO()));
|
||||
|
||||
ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block();
|
||||
assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql");
|
||||
|
@ -109,16 +109,16 @@ class KsqlServiceTest {
|
|||
@Test
|
||||
void shouldCallClientAndReturnResponse() {
|
||||
String clusterName = "test";
|
||||
KsqlCommand command = (new KsqlCommand()).ksql("describe streams;");
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
KsqlCommandResponse response = new KsqlCommandResponse().message("success");
|
||||
KsqlCommandResponseDTO response = new KsqlCommandResponseDTO().message("success");
|
||||
|
||||
when(clustersStorage.getClusterByName(clusterName))
|
||||
.thenReturn(Optional.ofNullable(kafkaCluster));
|
||||
when(kafkaCluster.getKsqldbServer()).thenReturn("host");
|
||||
when(ksqlClient.execute(any())).thenReturn(Mono.just(response));
|
||||
|
||||
KsqlCommandResponse receivedResponse =
|
||||
KsqlCommandResponseDTO receivedResponse =
|
||||
ksqlService.executeKsqlCommand(clusterName, Mono.just(command)).block();
|
||||
verify(ksqlClient, times(1)).execute(alternativeStrategy);
|
||||
assertThat(receivedResponse).isEqualTo(response);
|
||||
|
|
|
@ -5,9 +5,9 @@ import static org.assertj.core.api.Assertions.assertThat;
|
|||
import com.provectus.kafka.ui.AbstractBaseTest;
|
||||
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
|
||||
import com.provectus.kafka.ui.exception.TopicOrPartitionNotFoundException;
|
||||
import com.provectus.kafka.ui.model.BrokerTopicLogdirs;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirs;
|
||||
import com.provectus.kafka.ui.model.ErrorResponse;
|
||||
import com.provectus.kafka.ui.model.BrokerTopicLogdirsDTO;
|
||||
import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
||||
import com.provectus.kafka.ui.model.ErrorResponseDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -26,21 +26,21 @@ public class LogDirsTest extends AbstractBaseTest {
|
|||
|
||||
@Test
|
||||
public void testAllBrokers() {
|
||||
List<BrokersLogdirs> dirs = webTestClient.get()
|
||||
List<BrokersLogdirsDTO> dirs = webTestClient.get()
|
||||
.uri("/api/clusters/{clusterName}/brokers/logdirs", LOCAL)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokersLogdirs>>() {})
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokersLogdirsDTO>>() {})
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
assertThat(dirs).hasSize(1);
|
||||
BrokersLogdirs dir = dirs.get(0);
|
||||
BrokersLogdirsDTO dir = dirs.get(0);
|
||||
assertThat(dir.getName()).isEqualTo("/var/lib/kafka/data");
|
||||
assertThat(dir.getTopics().stream().anyMatch(t -> t.getName().equals("__consumer_offsets")))
|
||||
.isTrue();
|
||||
|
||||
BrokerTopicLogdirs topic = dir.getTopics().stream()
|
||||
BrokerTopicLogdirsDTO topic = dir.getTopics().stream()
|
||||
.filter(t -> t.getName().equals("__consumer_offsets"))
|
||||
.findAny().get();
|
||||
|
||||
|
@ -51,21 +51,21 @@ public class LogDirsTest extends AbstractBaseTest {
|
|||
|
||||
@Test
|
||||
public void testOneBrokers() {
|
||||
List<BrokersLogdirs> dirs = webTestClient.get()
|
||||
List<BrokersLogdirsDTO> dirs = webTestClient.get()
|
||||
.uri("/api/clusters/{clusterName}/brokers/logdirs?broker=1", LOCAL)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokersLogdirs>>() {})
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokersLogdirsDTO>>() {})
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
assertThat(dirs).hasSize(1);
|
||||
BrokersLogdirs dir = dirs.get(0);
|
||||
BrokersLogdirsDTO dir = dirs.get(0);
|
||||
assertThat(dir.getName()).isEqualTo("/var/lib/kafka/data");
|
||||
assertThat(dir.getTopics().stream().anyMatch(t -> t.getName().equals("__consumer_offsets")))
|
||||
.isTrue();
|
||||
|
||||
BrokerTopicLogdirs topic = dir.getTopics().stream()
|
||||
BrokerTopicLogdirsDTO topic = dir.getTopics().stream()
|
||||
.filter(t -> t.getName().equals("__consumer_offsets"))
|
||||
.findAny().get();
|
||||
|
||||
|
@ -76,11 +76,11 @@ public class LogDirsTest extends AbstractBaseTest {
|
|||
|
||||
@Test
|
||||
public void testWrongBrokers() {
|
||||
List<BrokersLogdirs> dirs = webTestClient.get()
|
||||
List<BrokersLogdirsDTO> dirs = webTestClient.get()
|
||||
.uri("/api/clusters/{clusterName}/brokers/logdirs?broker=2", LOCAL)
|
||||
.exchange()
|
||||
.expectStatus().isOk()
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokersLogdirs>>() {})
|
||||
.expectBody(new ParameterizedTypeReference<List<BrokersLogdirsDTO>>() {})
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
|
@ -89,7 +89,7 @@ public class LogDirsTest extends AbstractBaseTest {
|
|||
|
||||
@Test
|
||||
public void testChangeDirToWrongDir() {
|
||||
ErrorResponse dirs = webTestClient.patch()
|
||||
ErrorResponseDTO dirs = webTestClient.patch()
|
||||
.uri("/api/clusters/{clusterName}/brokers/{id}/logdirs", LOCAL, 1)
|
||||
.bodyValue(Map.of(
|
||||
"topic", "__consumer_offsets",
|
||||
|
@ -99,7 +99,7 @@ public class LogDirsTest extends AbstractBaseTest {
|
|||
)
|
||||
.exchange()
|
||||
.expectStatus().isBadRequest()
|
||||
.expectBody(ErrorResponse.class)
|
||||
.expectBody(ErrorResponseDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
|
@ -116,7 +116,7 @@ public class LogDirsTest extends AbstractBaseTest {
|
|||
)
|
||||
.exchange()
|
||||
.expectStatus().isBadRequest()
|
||||
.expectBody(ErrorResponse.class)
|
||||
.expectBody(ErrorResponseDTO.class)
|
||||
.returnResult()
|
||||
.getResponseBody();
|
||||
|
||||
|
|
|
@ -1,21 +1,23 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.model.SeekDirectionDTO.BACKWARD;
|
||||
import static com.provectus.kafka.ui.model.SeekDirectionDTO.FORWARD;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.AbstractBaseTest;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.producer.KafkaTestProducer;
|
||||
import com.provectus.kafka.ui.serde.SimpleRecordSerDe;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -30,7 +32,6 @@ import org.apache.kafka.clients.consumer.ConsumerConfig;
|
|||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
@ -92,7 +93,7 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(EMPTY_TOPIC,
|
||||
new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD)
|
||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD)
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
|
@ -100,13 +101,13 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
this::createConsumer,
|
||||
new OffsetsSeekBackward(
|
||||
EMPTY_TOPIC,
|
||||
new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.BACKWARD),
|
||||
new ConsumerPosition(BEGINNING, Map.of(), BACKWARD),
|
||||
100
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
Long polledValues = Flux.create(forwardEmitter)
|
||||
.filter(m -> m.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.limitRequest(100)
|
||||
.count()
|
||||
.block();
|
||||
|
@ -114,7 +115,7 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
assertThat(polledValues).isZero();
|
||||
|
||||
polledValues = Flux.create(backwardEmitter)
|
||||
.filter(m -> m.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.limitRequest(100)
|
||||
.count()
|
||||
.block();
|
||||
|
@ -128,23 +129,23 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(TOPIC,
|
||||
new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD)
|
||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD)
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD),
|
||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD),
|
||||
PARTITIONS * MSGS_PER_PARTITION
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var polledValues = Flux.create(forwardEmitter)
|
||||
.filter(m -> m.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.collect(Collectors.toList())
|
||||
.block();
|
||||
|
@ -153,10 +154,10 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()));
|
||||
|
||||
polledValues = Flux.create(backwardEmitter)
|
||||
.filter(m -> m.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.collect(Collectors.toList())
|
||||
.block();
|
||||
|
@ -177,23 +178,23 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(TOPIC,
|
||||
new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.FORWARD)
|
||||
new ConsumerPosition(OFFSET, targetOffsets, FORWARD)
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.BACKWARD),
|
||||
new ConsumerPosition(OFFSET, targetOffsets, BACKWARD),
|
||||
PARTITIONS * MSGS_PER_PARTITION
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var polledValues = Flux.create(forwardEmitter)
|
||||
.filter(m -> m.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.collect(Collectors.toList())
|
||||
.block();
|
||||
|
@ -211,10 +212,10 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
.collect(Collectors.toList());
|
||||
|
||||
polledValues = Flux.create(backwardEmitter)
|
||||
.filter(m -> m.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.filter(m -> m.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.collect(Collectors.toList())
|
||||
.block();
|
||||
|
@ -240,21 +241,21 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(TOPIC,
|
||||
new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps, SeekDirection.FORWARD)
|
||||
new ConsumerPosition(TIMESTAMP, targetTimestamps, FORWARD)
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps, SeekDirection.BACKWARD),
|
||||
new ConsumerPosition(TIMESTAMP, targetTimestamps, BACKWARD),
|
||||
PARTITIONS * MSGS_PER_PARTITION
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var polledValues = Flux.create(forwardEmitter)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.collect(Collectors.toList())
|
||||
|
@ -268,8 +269,8 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
|
||||
|
||||
polledValues = Flux.create(backwardEmitter)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.collect(Collectors.toList())
|
||||
|
@ -295,14 +296,14 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.BACKWARD),
|
||||
new ConsumerPosition(OFFSET, targetOffsets, BACKWARD),
|
||||
numMessages
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var polledValues = Flux.create(backwardEmitter)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.limitRequest(numMessages)
|
||||
.collect(Collectors.toList())
|
||||
|
@ -328,14 +329,14 @@ class RecordEmitterTest extends AbstractBaseTest {
|
|||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(SeekType.OFFSET, offsets, SeekDirection.BACKWARD),
|
||||
new ConsumerPosition(OFFSET, offsets, BACKWARD),
|
||||
100
|
||||
), new SimpleRecordSerDe()
|
||||
);
|
||||
|
||||
var polledValues = Flux.create(backwardEmitter)
|
||||
.filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
.filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.map(m -> m.getContent().toString())
|
||||
.limitRequest(Long.MAX_VALUE)
|
||||
.collect(Collectors.toList())
|
||||
|
|
|
@ -6,12 +6,12 @@ import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
|||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.AbstractBaseTest;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.MessageFormat;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEvent;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.MessageFormatDTO;
|
||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
|
@ -128,7 +128,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
void noSchemaStringKeyStringValue() {
|
||||
new SendAndReadSpec()
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key("testKey")
|
||||
.content("testValue")
|
||||
)
|
||||
|
@ -142,7 +142,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
void noSchemaJsonKeyJsonValue() {
|
||||
new SendAndReadSpec()
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key("{ \"f1\": 111, \"f2\": \"testStr1\" }")
|
||||
.content("{ \"f1\": 222, \"f2\": \"testStr2\" }")
|
||||
)
|
||||
|
@ -156,7 +156,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
void keyIsIntValueIsDoubleShouldBeSerializedAsStrings() {
|
||||
new SendAndReadSpec()
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key("123")
|
||||
.content("234.56")
|
||||
)
|
||||
|
@ -170,7 +170,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
void noSchemaKeyIsNull() {
|
||||
new SendAndReadSpec()
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(null)
|
||||
.content("testValue")
|
||||
)
|
||||
|
@ -184,7 +184,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
void noSchemaValueIsNull() {
|
||||
new SendAndReadSpec()
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key("testKey")
|
||||
.content(null)
|
||||
)
|
||||
|
@ -216,7 +216,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(AVRO_SCHEMA_1)
|
||||
.withValueSchema(AVRO_SCHEMA_2)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(AVRO_SCHEMA_1_JSON_RECORD)
|
||||
.content(AVRO_SCHEMA_2_JSON_RECORD)
|
||||
)
|
||||
|
@ -231,7 +231,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
new SendAndReadSpec()
|
||||
.withValueSchema(AVRO_SCHEMA_1)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key("testKey")
|
||||
.content(AVRO_SCHEMA_1_JSON_RECORD)
|
||||
)
|
||||
|
@ -246,7 +246,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
new SendAndReadSpec()
|
||||
.withKeySchema(AVRO_SCHEMA_1)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(AVRO_SCHEMA_1_JSON_RECORD)
|
||||
.content("testVal")
|
||||
)
|
||||
|
@ -261,7 +261,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
new SendAndReadSpec()
|
||||
.withValueSchema(PROTOBUF_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key("testKey")
|
||||
.content(PROTOBUF_SCHEMA_JSON_RECORD)
|
||||
)
|
||||
|
@ -277,7 +277,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(AVRO_SCHEMA_1)
|
||||
.withValueSchema(AVRO_SCHEMA_2)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(null)
|
||||
.content(AVRO_SCHEMA_2_JSON_RECORD)
|
||||
)
|
||||
|
@ -292,7 +292,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
new SendAndReadSpec()
|
||||
.withValueSchema(AVRO_SCHEMA_2)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
// f2 has type object instead of string
|
||||
.content("{ \"f1\": 111, \"f2\": {} }")
|
||||
)
|
||||
|
@ -305,7 +305,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(AVRO_SCHEMA_1)
|
||||
.withValueSchema(AVRO_SCHEMA_2)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(AVRO_SCHEMA_1_JSON_RECORD)
|
||||
.content(null)
|
||||
)
|
||||
|
@ -321,7 +321,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(AVRO_SCHEMA_1)
|
||||
.withValueSchema(PROTOBUF_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(AVRO_SCHEMA_1_JSON_RECORD)
|
||||
.content(PROTOBUF_SCHEMA_JSON_RECORD)
|
||||
)
|
||||
|
@ -336,7 +336,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
new SendAndReadSpec()
|
||||
.withValueSchema(PROTOBUF_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
// f2 field has type object instead of int
|
||||
.content("{ \"f1\" : \"test str\", \"f2\" : {} }"))
|
||||
.assertSendThrowsException();
|
||||
|
@ -348,7 +348,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(PROTOBUF_SCHEMA)
|
||||
.withValueSchema(JSON_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(PROTOBUF_SCHEMA_JSON_RECORD)
|
||||
.content(JSON_SCHEMA_RECORD)
|
||||
)
|
||||
|
@ -364,7 +364,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(JSON_SCHEMA)
|
||||
.withValueSchema(JSON_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(JSON_SCHEMA_RECORD)
|
||||
)
|
||||
.doAssert(polled -> {
|
||||
|
@ -378,7 +378,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
new SendAndReadSpec()
|
||||
.withValueSchema(JSON_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
// 'f2' field has has type object instead of string
|
||||
.content("{ \"f1\": 12, \"f2\": {}, \"schema\": \"some txt\" }")
|
||||
)
|
||||
|
@ -391,7 +391,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(AVRO_SCHEMA_1)
|
||||
.withValueSchema(AVRO_SCHEMA_2)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(AVRO_SCHEMA_1_JSON_RECORD)
|
||||
.content(AVRO_SCHEMA_2_JSON_RECORD)
|
||||
)
|
||||
|
@ -400,8 +400,8 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
assertJsonEqual(polled.getContent(), AVRO_SCHEMA_2_JSON_RECORD);
|
||||
assertThat(polled.getKeySize()).isEqualTo(15L);
|
||||
assertThat(polled.getValueSize()).isEqualTo(15L);
|
||||
assertThat(polled.getKeyFormat()).isEqualTo(MessageFormat.AVRO);
|
||||
assertThat(polled.getValueFormat()).isEqualTo(MessageFormat.AVRO);
|
||||
assertThat(polled.getKeyFormat()).isEqualTo(MessageFormatDTO.AVRO);
|
||||
assertThat(polled.getValueFormat()).isEqualTo(MessageFormatDTO.AVRO);
|
||||
assertThat(polled.getKeySchemaId()).isNotEmpty();
|
||||
assertThat(polled.getValueSchemaId()).isNotEmpty();
|
||||
});
|
||||
|
@ -413,7 +413,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(PROTOBUF_SCHEMA)
|
||||
.withValueSchema(PROTOBUF_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(PROTOBUF_SCHEMA_JSON_RECORD)
|
||||
.content(PROTOBUF_SCHEMA_JSON_RECORD)
|
||||
)
|
||||
|
@ -422,8 +422,8 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
assertJsonEqual(polled.getContent(), PROTOBUF_SCHEMA_JSON_RECORD);
|
||||
assertThat(polled.getKeySize()).isEqualTo(18L);
|
||||
assertThat(polled.getValueSize()).isEqualTo(18L);
|
||||
assertThat(polled.getKeyFormat()).isEqualTo(MessageFormat.PROTOBUF);
|
||||
assertThat(polled.getValueFormat()).isEqualTo(MessageFormat.PROTOBUF);
|
||||
assertThat(polled.getKeyFormat()).isEqualTo(MessageFormatDTO.PROTOBUF);
|
||||
assertThat(polled.getValueFormat()).isEqualTo(MessageFormatDTO.PROTOBUF);
|
||||
assertThat(polled.getKeySchemaId()).isNotEmpty();
|
||||
assertThat(polled.getValueSchemaId()).isNotEmpty();
|
||||
});
|
||||
|
@ -435,7 +435,7 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.withKeySchema(JSON_SCHEMA)
|
||||
.withValueSchema(JSON_SCHEMA)
|
||||
.withMsgToSend(
|
||||
new CreateTopicMessage()
|
||||
new CreateTopicMessageDTO()
|
||||
.key(JSON_SCHEMA_RECORD)
|
||||
.content(JSON_SCHEMA_RECORD)
|
||||
.headers(Map.of("header1", "value1"))
|
||||
|
@ -443,8 +443,8 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
.doAssert(polled -> {
|
||||
assertJsonEqual(polled.getKey(), JSON_SCHEMA_RECORD);
|
||||
assertJsonEqual(polled.getContent(), JSON_SCHEMA_RECORD);
|
||||
assertThat(polled.getKeyFormat()).isEqualTo(MessageFormat.JSON);
|
||||
assertThat(polled.getValueFormat()).isEqualTo(MessageFormat.JSON);
|
||||
assertThat(polled.getKeyFormat()).isEqualTo(MessageFormatDTO.JSON);
|
||||
assertThat(polled.getValueFormat()).isEqualTo(MessageFormatDTO.JSON);
|
||||
assertThat(polled.getKeySchemaId()).isNotEmpty();
|
||||
assertThat(polled.getValueSchemaId()).isNotEmpty();
|
||||
assertThat(polled.getKeySize()).isEqualTo(57L);
|
||||
|
@ -460,11 +460,11 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
}
|
||||
|
||||
class SendAndReadSpec {
|
||||
CreateTopicMessage msgToSend;
|
||||
CreateTopicMessageDTO msgToSend;
|
||||
ParsedSchema keySchema;
|
||||
ParsedSchema valueSchema;
|
||||
|
||||
public SendAndReadSpec withMsgToSend(CreateTopicMessage msg) {
|
||||
public SendAndReadSpec withMsgToSend(CreateTopicMessageDTO msg) {
|
||||
this.msgToSend = msg;
|
||||
return this;
|
||||
}
|
||||
|
@ -507,22 +507,22 @@ public class SendAndReadTests extends AbstractBaseTest {
|
|||
}
|
||||
|
||||
@SneakyThrows
|
||||
public void doAssert(Consumer<TopicMessage> msgAssert) {
|
||||
public void doAssert(Consumer<TopicMessageDTO> msgAssert) {
|
||||
String topic = createTopicAndCreateSchemas();
|
||||
try {
|
||||
clusterService.sendMessage(LOCAL, topic, msgToSend).block();
|
||||
TopicMessage polled = clusterService.getMessages(
|
||||
TopicMessageDTO polled = clusterService.getMessages(
|
||||
LOCAL,
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekType.BEGINNING,
|
||||
SeekTypeDTO.BEGINNING,
|
||||
Map.of(new TopicPartition(topic, 0), 0L),
|
||||
SeekDirection.FORWARD
|
||||
SeekDirectionDTO.FORWARD
|
||||
),
|
||||
null,
|
||||
1
|
||||
).filter(e -> e.getType().equals(TopicMessageEvent.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEvent::getMessage)
|
||||
).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
.map(TopicMessageEventDTO::getMessage)
|
||||
.blockLast(Duration.ofSeconds(5000));
|
||||
|
||||
assertThat(polled).isNotNull();
|
||||
|
|
|
@ -8,7 +8,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -59,7 +59,7 @@ class CreateStrategyTest {
|
|||
void shouldSerializeResponse() {
|
||||
String message = "updated successful";
|
||||
JsonNode node = getResponseWithMessage(message);
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
||||
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.Table;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -48,8 +48,8 @@ class DescribeStrategyTest {
|
|||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
JsonNode node = getResponseWithObjectNode();
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("key", "value"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("name", "kafka")));
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -49,7 +49,7 @@ class DropStrategyTest {
|
|||
void shouldSerializeResponse() {
|
||||
String message = "updated successful";
|
||||
JsonNode node = getResponseWithMessage(message);
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
||||
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.Table;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -46,8 +46,8 @@ class ExplainStrategyTest {
|
|||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
JsonNode node = getResponseWithObjectNode();
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("key", "value"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("name", "kafka")));
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.Table;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -47,8 +47,8 @@ class SelectStrategyTest {
|
|||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
JsonNode node = getResponseWithData();
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header1", "header2"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value1", "value2")));
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.Table;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
|
@ -57,8 +57,8 @@ class ShowStrategyTest {
|
|||
void shouldSerializeStreamsResponse() {
|
||||
JsonNode node = getResponseWithData("streams");
|
||||
strategy.test("show streams;");
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
|
@ -67,8 +67,8 @@ class ShowStrategyTest {
|
|||
void shouldSerializeTablesResponse() {
|
||||
JsonNode node = getResponseWithData("tables");
|
||||
strategy.test("show tables;");
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
|
@ -77,8 +77,8 @@ class ShowStrategyTest {
|
|||
void shouldSerializeTopicsResponse() {
|
||||
JsonNode node = getResponseWithData("topics");
|
||||
strategy.test("show topics;");
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ class ShowStrategyTest {
|
|||
void shouldSerializePropertiesResponse() {
|
||||
JsonNode node = getResponseWithData("properties");
|
||||
strategy.test("show properties;");
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
|
@ -97,8 +97,8 @@ class ShowStrategyTest {
|
|||
void shouldSerializeFunctionsResponse() {
|
||||
JsonNode node = getResponseWithData("functions");
|
||||
strategy.test("show functions;");
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
|
@ -107,8 +107,8 @@ class ShowStrategyTest {
|
|||
void shouldSerializeQueriesResponse() {
|
||||
JsonNode node = getResponseWithData("queries");
|
||||
strategy.test("show queries;");
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
Table table = serializedResponse.getData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponse;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -46,7 +46,7 @@ class TerminateStrategyTest {
|
|||
void shouldSerializeResponse() {
|
||||
String message = "query terminated.";
|
||||
JsonNode node = getResponseWithMessage(message);
|
||||
KsqlCommandResponse serializedResponse = strategy.serializeResponse(node);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
||||
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ package com.provectus.kafka.ui.util;
|
|||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -56,9 +56,9 @@ class OffsetsSeekTest {
|
|||
var seek = new OffsetsSeekForward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekType.BEGINNING,
|
||||
SeekTypeDTO.BEGINNING,
|
||||
Map.of(tp0, 0L, tp1, 0L),
|
||||
SeekDirection.FORWARD
|
||||
SeekDirectionDTO.FORWARD
|
||||
)
|
||||
);
|
||||
|
||||
|
@ -73,9 +73,9 @@ class OffsetsSeekTest {
|
|||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekType.BEGINNING,
|
||||
SeekTypeDTO.BEGINNING,
|
||||
Map.of(tp2, 0L, tp3, 0L),
|
||||
SeekDirection.BACKWARD
|
||||
SeekDirectionDTO.BACKWARD
|
||||
),
|
||||
10
|
||||
);
|
||||
|
@ -90,7 +90,7 @@ class OffsetsSeekTest {
|
|||
void forwardSeekToBeginningWithPartitionsList() {
|
||||
var seek = new OffsetsSeekForward(
|
||||
topic,
|
||||
new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD));
|
||||
new ConsumerPosition(SeekTypeDTO.BEGINNING, Map.of(), SeekDirectionDTO.FORWARD));
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
|
||||
assertThat(consumer.position(tp0)).isZero();
|
||||
|
@ -103,7 +103,7 @@ class OffsetsSeekTest {
|
|||
void backwardSeekToBeginningWithPartitionsList() {
|
||||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.BACKWARD),
|
||||
new ConsumerPosition(SeekTypeDTO.BEGINNING, Map.of(), SeekDirectionDTO.BACKWARD),
|
||||
10
|
||||
);
|
||||
seek.assignAndSeek(consumer);
|
||||
|
@ -120,9 +120,9 @@ class OffsetsSeekTest {
|
|||
var seek = new OffsetsSeekForward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekType.OFFSET,
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp0, 0L, tp1, 1L, tp2, 2L),
|
||||
SeekDirection.FORWARD
|
||||
SeekDirectionDTO.FORWARD
|
||||
)
|
||||
);
|
||||
seek.assignAndSeek(consumer);
|
||||
|
@ -135,9 +135,9 @@ class OffsetsSeekTest {
|
|||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekType.OFFSET,
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp0, 0L, tp1, 1L, tp2, 20L),
|
||||
SeekDirection.BACKWARD
|
||||
SeekDirectionDTO.BACKWARD
|
||||
),
|
||||
2
|
||||
);
|
||||
|
@ -151,9 +151,9 @@ class OffsetsSeekTest {
|
|||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekType.OFFSET,
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp2, 20L),
|
||||
SeekDirection.BACKWARD
|
||||
SeekDirectionDTO.BACKWARD
|
||||
),
|
||||
20
|
||||
);
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
</inputSpec>
|
||||
<output>${project.build.directory}/generated-sources/api</output>
|
||||
<generatorName>spring</generatorName>
|
||||
|
||||
<modelNameSuffix>DTO</modelNameSuffix>
|
||||
<configOptions>
|
||||
<modelPackage>com.provectus.kafka.ui.model</modelPackage>
|
||||
<apiPackage>com.provectus.kafka.ui.api</apiPackage>
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
package com.provectus.kafka.ui.helpers;
|
||||
|
||||
import lombok.SneakyThrows;
|
||||
|
||||
import com.provectus.kafka.ui.api.*;
|
||||
import com.provectus.kafka.ui.api.model.*;
|
||||
import com.provectus.kafka.ui.api.ApiClient;
|
||||
import com.provectus.kafka.ui.api.api.TopicsApi;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import com.provectus.kafka.ui.api.model.TopicCreation;
|
||||
import lombok.SneakyThrows;
|
||||
|
||||
public class ApiHelper {
|
||||
int partitions = 1;
|
||||
|
@ -18,7 +13,6 @@ public class ApiHelper {
|
|||
|
||||
|
||||
|
||||
|
||||
@SneakyThrows
|
||||
private TopicsApi topicApi(){
|
||||
ApiClient defaultClient = new ApiClient();
|
||||
|
|
Loading…
Add table
Reference in a new issue