Minor issues fixes (#1646)

* Fixed minor issues

* fixed review bug

* fixed bug

Co-authored-by: German Osin <germanosin@Germans-MacBook-Pro.local>
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
This commit is contained in:
German Osin 2022-02-21 15:13:48 +03:00 committed by GitHub
parent 94b1f4a772
commit 4cc4175ef2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
66 changed files with 316 additions and 272 deletions

View file

@ -6,6 +6,10 @@ import java.util.concurrent.ConcurrentHashMap;
public final class KafkaConnectClients { public final class KafkaConnectClients {
private KafkaConnectClients() {
}
private static final Map<String, KafkaConnectClientApi> CACHE = new ConcurrentHashMap<>(); private static final Map<String, KafkaConnectClientApi> CACHE = new ConcurrentHashMap<>();
public static KafkaConnectClientApi withBaseUrl(String basePath) { public static KafkaConnectClientApi withBaseUrl(String basePath) {

View file

@ -20,7 +20,6 @@ import org.springframework.web.reactive.function.client.WebClientResponseExcepti
import reactor.core.publisher.Flux; import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono; import reactor.core.publisher.Mono;
import reactor.util.retry.Retry; import reactor.util.retry.Retry;
import reactor.util.retry.RetryBackoffSpec;
@Slf4j @Slf4j
public class RetryingKafkaConnectClient extends KafkaConnectClientApi { public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
@ -32,7 +31,7 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
} }
private static Retry conflictCodeRetry() { private static Retry conflictCodeRetry() {
return RetryBackoffSpec return Retry
.fixedDelay(MAX_RETRIES, RETRIES_DELAY) .fixedDelay(MAX_RETRIES, RETRIES_DELAY)
.filter(e -> e instanceof WebClientResponseException.Conflict) .filter(e -> e instanceof WebClientResponseException.Conflict)
.onRetryExhaustedThrow((spec, signal) -> .onRetryExhaustedThrow((spec, signal) ->

View file

@ -61,14 +61,14 @@ public class ClustersProperties {
private void validateClusterNames() { private void validateClusterNames() {
// if only one cluster provided it is ok not to set name // if only one cluster provided it is ok not to set name
if (clusters.size() == 1 && StringUtils.isEmpty(clusters.get(0).getName())) { if (clusters.size() == 1 && !StringUtils.hasText(clusters.get(0).getName())) {
clusters.get(0).setName("Default"); clusters.get(0).setName("Default");
return; return;
} }
Set<String> clusterNames = new HashSet<>(); Set<String> clusterNames = new HashSet<>();
for (Cluster clusterProperties : clusters) { for (Cluster clusterProperties : clusters) {
if (StringUtils.isEmpty(clusterProperties.getName())) { if (!StringUtils.hasText(clusterProperties.getName())) {
throw new IllegalStateException( throw new IllegalStateException(
"Application config isn't valid. " "Application config isn't valid. "
+ "Cluster names should be provided in case of multiple clusters present"); + "Cluster names should be provided in case of multiple clusters present");
@ -79,5 +79,4 @@ public class ClustersProperties {
} }
} }
} }
} }

View file

@ -1,6 +1,5 @@
package com.provectus.kafka.ui.config; package com.provectus.kafka.ui.config;
import com.fasterxml.jackson.databind.Module;
import com.provectus.kafka.ui.model.JmxConnectionInfo; import com.provectus.kafka.ui.model.JmxConnectionInfo;
import com.provectus.kafka.ui.util.JmxPoolFactory; import com.provectus.kafka.ui.util.JmxPoolFactory;
import java.util.Collections; import java.util.Collections;

View file

@ -45,7 +45,7 @@ public class ReadOnlyModeFilter implements WebFilter {
() -> new ClusterNotFoundException( () -> new ClusterNotFoundException(
String.format("No cluster for name '%s'", clusterName))); String.format("No cluster for name '%s'", clusterName)));
if (!kafkaCluster.getReadOnly()) { if (!kafkaCluster.isReadOnly()) {
return chain.filter(exchange); return chain.filter(exchange);
} }

View file

@ -2,7 +2,11 @@ package com.provectus.kafka.ui.config.auth;
abstract class AbstractAuthSecurityConfig { abstract class AbstractAuthSecurityConfig {
public static final String[] AUTH_WHITELIST = { protected AbstractAuthSecurityConfig() {
}
protected static final String[] AUTH_WHITELIST = {
"/css/**", "/css/**",
"/js/**", "/js/**",
"/media/**", "/media/**",

View file

@ -43,11 +43,11 @@ public class LdapSecurityConfig extends AbstractAuthSecurityConfig {
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource) { public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource) {
BindAuthenticator ba = new BindAuthenticator(contextSource); BindAuthenticator ba = new BindAuthenticator(contextSource);
if (ldapUserDnPattern != null) { if (ldapUserDnPattern != null) {
ba.setUserDnPatterns(new String[]{ldapUserDnPattern}); ba.setUserDnPatterns(new String[] {ldapUserDnPattern});
} }
if (userFilterSearchFilter != null) { if (userFilterSearchFilter != null) {
LdapUserSearch userSearch = LdapUserSearch userSearch =
new FilterBasedLdapUserSearch(userFilterSearchBase, userFilterSearchFilter, contextSource); new FilterBasedLdapUserSearch(userFilterSearchBase, userFilterSearchFilter, contextSource);
ba.setUserSearch(userSearch); ba.setUserSearch(userSearch);
} }

View file

@ -39,14 +39,14 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
.authenticated(); .authenticated();
if (IS_OAUTH2_PRESENT && OAuth2ClasspathGuard.shouldConfigure(this.context)) { if (IS_OAUTH2_PRESENT && OAuth2ClasspathGuard.shouldConfigure(this.context)) {
OAuth2ClasspathGuard.configure(this.context, http); OAuth2ClasspathGuard.configure(http);
} }
return http.csrf().disable().build(); return http.csrf().disable().build();
} }
private static class OAuth2ClasspathGuard { private static class OAuth2ClasspathGuard {
static void configure(ApplicationContext context, ServerHttpSecurity http) { static void configure(ServerHttpSecurity http) {
http http
.oauth2Login() .oauth2Login()
.and() .and()

View file

@ -15,8 +15,8 @@ import reactor.core.publisher.Mono;
@Slf4j @Slf4j
public class AuthController { public class AuthController {
@GetMapping(value = "/auth", produces = { "text/html" }) @GetMapping(value = "/auth", produces = {"text/html"})
private Mono<byte[]> getAuth(ServerWebExchange exchange) { public Mono<byte[]> getAuth(ServerWebExchange exchange) {
Mono<CsrfToken> token = exchange.getAttributeOrDefault(CsrfToken.class.getName(), Mono.empty()); Mono<CsrfToken> token = exchange.getAttributeOrDefault(CsrfToken.class.getName(), Mono.empty());
return token return token
.map(AuthController::csrfToken) .map(AuthController::csrfToken)
@ -30,25 +30,25 @@ public class AuthController {
String contextPath = exchange.getRequest().getPath().contextPath().value(); String contextPath = exchange.getRequest().getPath().contextPath().value();
String page = String page =
"<!DOCTYPE html>\n" + "<html lang=\"en\">\n" + " <head>\n" "<!DOCTYPE html>\n" + "<html lang=\"en\">\n" + " <head>\n"
+ " <meta charset=\"utf-8\">\n" + " <meta charset=\"utf-8\">\n"
+ " <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, " + " <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, "
+ "shrink-to-fit=no\">\n" + "shrink-to-fit=no\">\n"
+ " <meta name=\"description\" content=\"\">\n" + " <meta name=\"description\" content=\"\">\n"
+ " <meta name=\"author\" content=\"\">\n" + " <meta name=\"author\" content=\"\">\n"
+ " <title>Please sign in</title>\n" + " <title>Please sign in</title>\n"
+ " <link href=\"https://maxcdn.bootstrapcdn.com/bootstrap/" + " <link href=\"https://maxcdn.bootstrapcdn.com/bootstrap/"
+ "4.0.0-beta/css/bootstrap.min.css\" rel=\"stylesheet\" " + "4.0.0-beta/css/bootstrap.min.css\" rel=\"stylesheet\" "
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" " + "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
+ "crossorigin=\"anonymous\">\n" + "crossorigin=\"anonymous\">\n"
+ " <link href=\"https://getbootstrap.com/docs/4.0/examples/signin/signin.css\" " + " <link href=\"https://getbootstrap.com/docs/4.0/examples/signin/signin.css\" "
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n" + "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
+ " </head>\n" + " </head>\n"
+ " <body>\n" + " <body>\n"
+ " <div class=\"container\">\n" + " <div class=\"container\">\n"
+ formLogin(queryParams, contextPath, csrfTokenHtmlInput) + formLogin(queryParams, contextPath, csrfTokenHtmlInput)
+ " </div>\n" + " </div>\n"
+ " </body>\n" + " </body>\n"
+ "</html>"; + "</html>";
return page.getBytes(Charset.defaultCharset()); return page.getBytes(Charset.defaultCharset());
} }
@ -61,21 +61,21 @@ public class AuthController {
boolean isLogoutSuccess = queryParams.containsKey("logout"); boolean isLogoutSuccess = queryParams.containsKey("logout");
return return
" <form class=\"form-signin\" method=\"post\" action=\"" + contextPath + "/auth\">\n" " <form class=\"form-signin\" method=\"post\" action=\"" + contextPath + "/auth\">\n"
+ " <h2 class=\"form-signin-heading\">Please sign in</h2>\n" + " <h2 class=\"form-signin-heading\">Please sign in</h2>\n"
+ createError(isError) + createError(isError)
+ createLogoutSuccess(isLogoutSuccess) + createLogoutSuccess(isLogoutSuccess)
+ " <p>\n" + " <p>\n"
+ " <label for=\"username\" class=\"sr-only\">Username</label>\n" + " <label for=\"username\" class=\"sr-only\">Username</label>\n"
+ " <input type=\"text\" id=\"username\" name=\"username\" class=\"form-control\" " + " <input type=\"text\" id=\"username\" name=\"username\" class=\"form-control\" "
+ "placeholder=\"Username\" required autofocus>\n" + "placeholder=\"Username\" required autofocus>\n"
+ " </p>\n" + " <p>\n" + " </p>\n" + " <p>\n"
+ " <label for=\"password\" class=\"sr-only\">Password</label>\n" + " <label for=\"password\" class=\"sr-only\">Password</label>\n"
+ " <input type=\"password\" id=\"password\" name=\"password\" " + " <input type=\"password\" id=\"password\" name=\"password\" "
+ "class=\"form-control\" placeholder=\"Password\" required>\n" + "class=\"form-control\" placeholder=\"Password\" required>\n"
+ " </p>\n" + csrfTokenHtmlInput + " </p>\n" + csrfTokenHtmlInput
+ " <button class=\"btn btn-lg btn-primary btn-block\" " + " <button class=\"btn btn-lg btn-primary btn-block\" "
+ "type=\"submit\">Sign in</button>\n" + "type=\"submit\">Sign in</button>\n"
+ " </form>\n"; + " </form>\n";
} }
private static String csrfToken(CsrfToken token) { private static String csrfToken(CsrfToken token) {

View file

@ -25,7 +25,7 @@ public class BrokersController extends AbstractController implements BrokersApi
@Override @Override
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id, public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return brokerService.getBrokerMetrics(getCluster(clusterName), id) return brokerService.getBrokerMetrics(getCluster(clusterName), id)
.map(ResponseEntity::ok) .map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build()); .onErrorReturn(ResponseEntity.notFound().build());
@ -33,14 +33,14 @@ public class BrokersController extends AbstractController implements BrokersApi
@Override @Override
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName, public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return Mono.just(ResponseEntity.ok(brokerService.getBrokers(getCluster(clusterName)))); return Mono.just(ResponseEntity.ok(brokerService.getBrokers(getCluster(clusterName))));
} }
@Override @Override
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName, public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
List<Integer> brokers, List<Integer> brokers,
ServerWebExchange exchange ServerWebExchange exchange
) { ) {
return Mono.just(ResponseEntity.ok( return Mono.just(ResponseEntity.ok(
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers))); brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
@ -48,7 +48,7 @@ public class BrokersController extends AbstractController implements BrokersApi
@Override @Override
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName, Integer id, public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName, Integer id,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return Mono.just(ResponseEntity.ok( return Mono.just(ResponseEntity.ok(
brokerService.getBrokerConfig(getCluster(clusterName), id))); brokerService.getBrokerConfig(getCluster(clusterName), id)));
} }

View file

@ -21,7 +21,7 @@ public class ClustersController extends AbstractController implements ClustersAp
@Override @Override
public Mono<ResponseEntity<ClusterMetricsDTO>> getClusterMetrics(String clusterName, public Mono<ResponseEntity<ClusterMetricsDTO>> getClusterMetrics(String clusterName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return clusterService.getClusterMetrics(getCluster(clusterName)) return clusterService.getClusterMetrics(getCluster(clusterName))
.map(ResponseEntity::ok) .map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build()); .onErrorReturn(ResponseEntity.notFound().build());
@ -29,7 +29,7 @@ public class ClustersController extends AbstractController implements ClustersAp
@Override @Override
public Mono<ResponseEntity<ClusterStatsDTO>> getClusterStats(String clusterName, public Mono<ResponseEntity<ClusterStatsDTO>> getClusterStats(String clusterName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return clusterService.getClusterStats(getCluster(clusterName)) return clusterService.getClusterStats(getCluster(clusterName))
.map(ResponseEntity::ok) .map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build()); .onErrorReturn(ResponseEntity.notFound().build());
@ -42,7 +42,7 @@ public class ClustersController extends AbstractController implements ClustersAp
@Override @Override
public Mono<ResponseEntity<ClusterDTO>> updateClusterInfo(String clusterName, public Mono<ResponseEntity<ClusterDTO>> updateClusterInfo(String clusterName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok); return clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok);
} }
} }

View file

@ -56,7 +56,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
@Override @Override
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName, public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return consumerGroupService.getAllConsumerGroups(getCluster(clusterName)) return consumerGroupService.getAllConsumerGroups(getCluster(clusterName))
.map(Flux::fromIterable) .map(Flux::fromIterable)
.map(f -> f.map(ConsumerGroupMapper::toDto)) .map(f -> f.map(ConsumerGroupMapper::toDto))
@ -96,7 +96,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
} }
private ConsumerGroupsPageResponseDTO convertPage(ConsumerGroupService.ConsumerGroupsPage private ConsumerGroupsPageResponseDTO convertPage(ConsumerGroupService.ConsumerGroupsPage
consumerGroupConsumerGroupsPage) { consumerGroupConsumerGroupsPage) {
return new ConsumerGroupsPageResponseDTO() return new ConsumerGroupsPageResponseDTO()
.pageCount(consumerGroupConsumerGroupsPage.getTotalPages()) .pageCount(consumerGroupConsumerGroupsPage.getTotalPages())
.consumerGroups(consumerGroupConsumerGroupsPage.getConsumerGroups() .consumerGroups(consumerGroupConsumerGroupsPage.getConsumerGroups()

View file

@ -28,7 +28,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
@Override @Override
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName, public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return kafkaConnectService.getConnects(getCluster(clusterName)).map(ResponseEntity::ok); return kafkaConnectService.getConnects(getCluster(clusterName)).map(ResponseEntity::ok);
} }
@ -41,16 +41,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
@Override @Override
public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName, public Mono<ResponseEntity<ConnectorDTO>> createConnector(String clusterName, String connectName,
@Valid Mono<NewConnectorDTO> connector, @Valid Mono<NewConnectorDTO> connector,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector) return kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
.map(ResponseEntity::ok); .map(ResponseEntity::ok);
} }
@Override @Override
public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName, public Mono<ResponseEntity<ConnectorDTO>> getConnector(String clusterName, String connectName,
String connectorName, String connectorName,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName) return kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok); .map(ResponseEntity::ok);
} }
@ -87,9 +87,9 @@ public class KafkaConnectController extends AbstractController implements KafkaC
@Override @Override
public Mono<ResponseEntity<ConnectorDTO>> setConnectorConfig(String clusterName, public Mono<ResponseEntity<ConnectorDTO>> setConnectorConfig(String clusterName,
String connectName, String connectName,
String connectorName, String connectorName,
@Valid Mono<Object> requestBody, @Valid Mono<Object> requestBody,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return kafkaConnectService return kafkaConnectService
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody) .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
.map(ResponseEntity::ok); .map(ResponseEntity::ok);
@ -135,8 +135,8 @@ public class KafkaConnectController extends AbstractController implements KafkaC
@Override @Override
public Mono<ResponseEntity<ConnectorPluginConfigValidationResponseDTO>> public Mono<ResponseEntity<ConnectorPluginConfigValidationResponseDTO>>
validateConnectorPluginConfig( validateConnectorPluginConfig(
String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody, String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return kafkaConnectService return kafkaConnectService
.validateConnectorPluginConfig( .validateConnectorPluginConfig(
getCluster(clusterName), connectName, pluginName, requestBody) getCluster(clusterName), connectName, pluginName, requestBody)

View file

@ -27,9 +27,9 @@ public class KsqlController extends AbstractController implements KsqlApi {
@Override @Override
public Mono<ResponseEntity<KsqlCommandResponseDTO>> executeKsqlCommand(String clusterName, public Mono<ResponseEntity<KsqlCommandResponseDTO>> executeKsqlCommand(String clusterName,
Mono<KsqlCommandDTO> Mono<KsqlCommandDTO>
ksqlCommand, ksqlCommand,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand) return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand)
.map(ResponseEntity::ok); .map(ResponseEntity::ok);
} }

View file

@ -1,13 +1,11 @@
package com.provectus.kafka.ui.controller; package com.provectus.kafka.ui.controller;
import com.provectus.kafka.ui.util.ResourceUtil; import com.provectus.kafka.ui.util.ResourceUtil;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import lombok.RequiredArgsConstructor; import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows; import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value; import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.web.ServerProperties;
import org.springframework.core.io.Resource; import org.springframework.core.io.Resource;
import org.springframework.http.ResponseEntity; import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.GetMapping;
@ -24,7 +22,7 @@ public class StaticController {
private Resource indexFile; private Resource indexFile;
private final AtomicReference<String> renderedIndexFile = new AtomicReference<>(); private final AtomicReference<String> renderedIndexFile = new AtomicReference<>();
@GetMapping(value = "/index.html", produces = { "text/html" }) @GetMapping(value = "/index.html", produces = {"text/html"})
public Mono<ResponseEntity<String>> getIndex(ServerWebExchange exchange) { public Mono<ResponseEntity<String>> getIndex(ServerWebExchange exchange) {
return Mono.just(ResponseEntity.ok(getRenderedIndexFile(exchange))); return Mono.just(ResponseEntity.ok(getRenderedIndexFile(exchange)));
} }

View file

@ -43,7 +43,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName, public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
String topicName, ServerWebExchange serverWebExchange) { String topicName, ServerWebExchange serverWebExchange) {
return topicsService.recreateTopic(getCluster(clusterName), topicName) return topicsService.recreateTopic(getCluster(clusterName), topicName)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED)); .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED));
} }
@Override @Override
@ -70,12 +70,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
@Override @Override
public Mono<ResponseEntity<TopicsResponseDTO>> getTopics(String clusterName, @Valid Integer page, public Mono<ResponseEntity<TopicsResponseDTO>> getTopics(String clusterName, @Valid Integer page,
@Valid Integer perPage, @Valid Integer perPage,
@Valid Boolean showInternal, @Valid Boolean showInternal,
@Valid String search, @Valid String search,
@Valid TopicColumnsToSortDTO orderBy, @Valid TopicColumnsToSortDTO orderBy,
@Valid SortOrderDTO sortOrder, @Valid SortOrderDTO sortOrder,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return topicsService return topicsService
.getTopics( .getTopics(
getCluster(clusterName), getCluster(clusterName),
@ -101,10 +101,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
String clusterName, String topicName, String clusterName, String topicName,
Mono<PartitionsIncreaseDTO> partitionsIncrease, Mono<PartitionsIncreaseDTO> partitionsIncrease,
ServerWebExchange exchange) { ServerWebExchange exchange) {
return partitionsIncrease.flatMap( return partitionsIncrease.flatMap(partitions ->
partitions -> topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)) ).map(ResponseEntity::ok);
.map(ResponseEntity::ok);
} }
@Override @Override

View file

@ -19,7 +19,7 @@ public abstract class AbstractEmitter {
private final RecordSerDe recordDeserializer; private final RecordSerDe recordDeserializer;
private final ConsumingStats consumingStats = new ConsumingStats(); private final ConsumingStats consumingStats = new ConsumingStats();
public AbstractEmitter(RecordSerDe recordDeserializer) { protected AbstractEmitter(RecordSerDe recordDeserializer) {
this.recordDeserializer = recordDeserializer; this.recordDeserializer = recordDeserializer;
} }
@ -33,7 +33,7 @@ public abstract class AbstractEmitter {
} }
protected FluxSink<TopicMessageEventDTO> sendMessage(FluxSink<TopicMessageEventDTO> sink, protected FluxSink<TopicMessageEventDTO> sendMessage(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecord<Bytes, Bytes> msg) { ConsumerRecord<Bytes, Bytes> msg) {
final TopicMessageDTO topicMessage = ClusterUtil.mapToTopicMessage(msg, recordDeserializer); final TopicMessageDTO topicMessage = ClusterUtil.mapToTopicMessage(msg, recordDeserializer);
return sink.next( return sink.next(
new TopicMessageEventDTO() new TopicMessageEventDTO()
@ -45,8 +45,8 @@ public abstract class AbstractEmitter {
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) { protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
sink.next( sink.next(
new TopicMessageEventDTO() new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.PHASE) .type(TopicMessageEventDTO.TypeEnum.PHASE)
.phase(new TopicMessagePhaseDTO().name(name)) .phase(new TopicMessagePhaseDTO().name(name))
); );
} }

View file

@ -87,7 +87,7 @@ public class BackwardRecordEmitter
// This is workaround for case when partition begin offset is less than // This is workaround for case when partition begin offset is less than
// real minimal offset, usually appear in compcated topics // real minimal offset, usually appear in compcated topics
if (records.count() > 0 && partitionRecords.isEmpty()) { if (records.count() > 0 && partitionRecords.isEmpty()) {
waitingOffsets.markPolled(entry.getKey().partition()); waitingOffsets.markPolled(entry.getKey().partition());
} }

View file

@ -15,15 +15,15 @@ class ConsumingStats {
private long elapsed = 0; private long elapsed = 0;
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecords<Bytes, Bytes> polledRecords, ConsumerRecords<Bytes, Bytes> polledRecords,
long elapsed) { long elapsed) {
for (ConsumerRecord<Bytes, Bytes> record : polledRecords) { for (ConsumerRecord<Bytes, Bytes> rec : polledRecords) {
for (Header header : record.headers()) { for (Header header : rec.headers()) {
bytes += bytes +=
(header.key() != null ? header.key().getBytes().length : 0L) (header.key() != null ? header.key().getBytes().length : 0L)
+ (header.value() != null ? header.value().length : 0L); + (header.value() != null ? header.value().length : 0L);
} }
bytes += record.serializedKeySize() + record.serializedValueSize(); bytes += rec.serializedKeySize() + rec.serializedValueSize();
} }
this.records += polledRecords.count(); this.records += polledRecords.count();
this.elapsed += elapsed; this.elapsed += elapsed;

View file

@ -18,6 +18,9 @@ public class MessageFilters {
private static GroovyScriptEngineImpl GROOVY_ENGINE; private static GroovyScriptEngineImpl GROOVY_ENGINE;
private MessageFilters() {
}
public static Predicate<TopicMessageDTO> createMsgFilter(String query, MessageFilterTypeDTO type) { public static Predicate<TopicMessageDTO> createMsgFilter(String query, MessageFilterTypeDTO type) {
switch (type) { switch (type) {
case STRING_CONTAINS: case STRING_CONTAINS:

View file

@ -19,7 +19,7 @@ public abstract class CustomBaseException extends RuntimeException {
} }
protected CustomBaseException(String message, Throwable cause, boolean enableSuppression, protected CustomBaseException(String message, Throwable cause, boolean enableSuppression,
boolean writableStackTrace) { boolean writableStackTrace) {
super(message, cause, enableSuppression, writableStackTrace); super(message, cause, enableSuppression, writableStackTrace);
} }

View file

@ -34,7 +34,7 @@ public enum ErrorCode {
for (ErrorCode value : ErrorCode.values()) { for (ErrorCode value : ErrorCode.values()) {
if (!codes.add(value.code())) { if (!codes.add(value.code())) {
LoggerFactory.getLogger(ErrorCode.class) LoggerFactory.getLogger(ErrorCode.class)
.warn("Multiple {} values refer to code {}", ErrorCode.class, value.code); .warn("Multiple {} values refer to code {}", ErrorCode.class, value.code);
} }
} }
} }

View file

@ -1,12 +0,0 @@
package com.provectus.kafka.ui.exception;
public class SchemaTypeIsNotSupportedException extends UnprocessableEntityException {
private static final String REQUIRED_SCHEMA_REGISTRY_VERSION = "5.5.0";
public SchemaTypeIsNotSupportedException() {
super(String.format("Current version of Schema Registry does "
+ "not support provided schema type,"
+ " version %s or later is required here.", REQUIRED_SCHEMA_REGISTRY_VERSION));
}
}

View file

@ -0,0 +1,12 @@
package com.provectus.kafka.ui.exception;
public class SchemaTypeNotSupportedException extends UnprocessableEntityException {
private static final String REQUIRED_SCHEMA_REGISTRY_VERSION = "5.5.0";
public SchemaTypeNotSupportedException() {
super(String.format("Current version of Schema Registry does "
+ "not support provided schema type,"
+ " version %s or later is required here.", REQUIRED_SCHEMA_REGISTRY_VERSION));
}
}

View file

@ -8,6 +8,6 @@ public class TopicRecreationException extends CustomBaseException {
public TopicRecreationException(String topicName, int seconds) { public TopicRecreationException(String topicName, int seconds) {
super(String.format("Can't create topic '%s' in %d seconds: " super(String.format("Can't create topic '%s' in %d seconds: "
+ "topic deletion is still in progress", topicName, seconds)); + "topic deletion is still in progress", topicName, seconds));
} }
} }

View file

@ -17,6 +17,9 @@ import org.apache.kafka.common.TopicPartition;
public class ConsumerGroupMapper { public class ConsumerGroupMapper {
private ConsumerGroupMapper() {
}
public static ConsumerGroupDTO toDto(InternalConsumerGroup c) { public static ConsumerGroupDTO toDto(InternalConsumerGroup c) {
return convertToConsumerGroup(c, new ConsumerGroupDTO()); return convertToConsumerGroup(c, new ConsumerGroupDTO());
} }
@ -47,7 +50,7 @@ public class ConsumerGroupMapper {
for (TopicPartition topicPartition : member.getAssignment()) { for (TopicPartition topicPartition : member.getAssignment()) {
final ConsumerGroupTopicPartitionDTO partition = partitionMap.computeIfAbsent( final ConsumerGroupTopicPartitionDTO partition = partitionMap.computeIfAbsent(
topicPartition, topicPartition,
(tp) -> new ConsumerGroupTopicPartitionDTO() tp -> new ConsumerGroupTopicPartitionDTO()
.topic(tp.topic()) .topic(tp.topic())
.partition(tp.partition()) .partition(tp.partition())
); );
@ -99,12 +102,18 @@ public class ConsumerGroupMapper {
private static ConsumerGroupStateDTO mapConsumerGroupState( private static ConsumerGroupStateDTO mapConsumerGroupState(
org.apache.kafka.common.ConsumerGroupState state) { org.apache.kafka.common.ConsumerGroupState state) {
switch (state) { switch (state) {
case DEAD: return ConsumerGroupStateDTO.DEAD; case DEAD:
case EMPTY: return ConsumerGroupStateDTO.EMPTY; return ConsumerGroupStateDTO.DEAD;
case STABLE: return ConsumerGroupStateDTO.STABLE; case EMPTY:
case PREPARING_REBALANCE: return ConsumerGroupStateDTO.PREPARING_REBALANCE; return ConsumerGroupStateDTO.EMPTY;
case COMPLETING_REBALANCE: return ConsumerGroupStateDTO.COMPLETING_REBALANCE; case STABLE:
default: return ConsumerGroupStateDTO.UNKNOWN; return ConsumerGroupStateDTO.STABLE;
case PREPARING_REBALANCE:
return ConsumerGroupStateDTO.PREPARING_REBALANCE;
case COMPLETING_REBALANCE:
return ConsumerGroupStateDTO.COMPLETING_REBALANCE;
default:
return ConsumerGroupStateDTO.UNKNOWN;
} }
} }

View file

@ -25,7 +25,7 @@ public class DescribeLogDirsMapper {
} }
private BrokersLogdirsDTO toBrokerLogDirs(Integer broker, String dirName, private BrokersLogdirsDTO toBrokerLogDirs(Integer broker, String dirName,
DescribeLogDirsResponse.LogDirInfo logDirInfo) { DescribeLogDirsResponse.LogDirInfo logDirInfo) {
BrokersLogdirsDTO result = new BrokersLogdirsDTO(); BrokersLogdirsDTO result = new BrokersLogdirsDTO();
result.setName(dirName); result.setName(dirName);
if (logDirInfo.error != null) { if (logDirInfo.error != null) {
@ -40,8 +40,8 @@ public class DescribeLogDirsMapper {
} }
private BrokerTopicLogdirsDTO toTopicLogDirs(Integer broker, String name, private BrokerTopicLogdirsDTO toTopicLogDirs(Integer broker, String name,
List<Map.Entry<TopicPartition, List<Map.Entry<TopicPartition,
DescribeLogDirsResponse.ReplicaInfo>> partitions) { DescribeLogDirsResponse.ReplicaInfo>> partitions) {
BrokerTopicLogdirsDTO topic = new BrokerTopicLogdirsDTO(); BrokerTopicLogdirsDTO topic = new BrokerTopicLogdirsDTO();
topic.setName(name); topic.setName(name);
topic.setPartitions( topic.setPartitions(
@ -53,8 +53,8 @@ public class DescribeLogDirsMapper {
} }
private BrokerTopicPartitionLogdirDTO topicPartitionLogDir(Integer broker, Integer partition, private BrokerTopicPartitionLogdirDTO topicPartitionLogDir(Integer broker, Integer partition,
DescribeLogDirsResponse.ReplicaInfo DescribeLogDirsResponse.ReplicaInfo
replicaInfo) { replicaInfo) {
BrokerTopicPartitionLogdirDTO logDir = new BrokerTopicPartitionLogdirDTO(); BrokerTopicPartitionLogdirDTO logDir = new BrokerTopicPartitionLogdirDTO();
logDir.setBroker(broker); logDir.setBroker(broker);
logDir.setPartition(partition); logDir.setPartition(partition);

View file

@ -1,6 +1,5 @@
package com.provectus.kafka.ui.model; package com.provectus.kafka.ui.model;
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
@ -11,24 +10,24 @@ public enum CleanupPolicy {
COMPACT_DELETE(Arrays.asList("compact,delete", "delete,compact")), COMPACT_DELETE(Arrays.asList("compact,delete", "delete,compact")),
UNKNOWN("unknown"); UNKNOWN("unknown");
private final List<String> cleanUpPolicy; private final List<String> policies;
CleanupPolicy(String cleanUpPolicy) { CleanupPolicy(String policy) {
this(Collections.singletonList(cleanUpPolicy)); this(Collections.singletonList(policy));
} }
CleanupPolicy(List<String> cleanUpPolicy) { CleanupPolicy(List<String> policies) {
this.cleanUpPolicy = cleanUpPolicy; this.policies = policies;
} }
public String getCleanUpPolicy() { public String getPolicy() {
return cleanUpPolicy.get(0); return policies.get(0);
} }
public static CleanupPolicy fromString(String string) { public static CleanupPolicy fromString(String string) {
return Arrays.stream(CleanupPolicy.values()) return Arrays.stream(CleanupPolicy.values())
.filter(v -> .filter(v ->
v.cleanUpPolicy.stream().anyMatch( v.policies.stream().anyMatch(
s -> s.equals(string.replace(" ", "") s -> s.equals(string.replace(" ", "")
) )
) )

View file

@ -38,7 +38,7 @@ public class InternalClusterMetrics {
// zk stats // zk stats
@Deprecated //use 'zookeeperStatus' field with enum type instead @Deprecated //use 'zookeeperStatus' field with enum type instead
private final int zooKeeperStatus; private final int zooKeeperStatusEnum;
private final ServerStatusDTO zookeeperStatus; private final ServerStatusDTO zookeeperStatus;
private final Throwable lastZookeeperException; private final Throwable lastZookeeperException;

View file

@ -67,7 +67,7 @@ public class InternalClusterState {
inSyncReplicasCount = partitionsStats.getInSyncReplicasCount(); inSyncReplicasCount = partitionsStats.getInSyncReplicasCount();
outOfSyncReplicasCount = partitionsStats.getOutOfSyncReplicasCount(); outOfSyncReplicasCount = partitionsStats.getOutOfSyncReplicasCount();
underReplicatedPartitionCount = partitionsStats.getUnderReplicatedPartitionCount(); underReplicatedPartitionCount = partitionsStats.getUnderReplicatedPartitionCount();
readOnly = cluster.getReadOnly(); readOnly = cluster.isReadOnly();
} }
} }

View file

@ -36,7 +36,7 @@ public class InternalConsumerGroup {
} }
public static InternalConsumerGroup create( public static InternalConsumerGroup create(
ConsumerGroupDescription description, ConsumerGroupDescription description,
Map<TopicPartition, Long> groupOffsets, Map<TopicPartition, Long> groupOffsets,
Map<TopicPartition, Long> topicEndOffsets) { Map<TopicPartition, Long> topicEndOffsets) {
var builder = InternalConsumerGroup.builder(); var builder = InternalConsumerGroup.builder();
@ -65,21 +65,21 @@ public class InternalConsumerGroup {
// removes data for all partitions that are not fit filter // removes data for all partitions that are not fit filter
public InternalConsumerGroup retainDataForPartitions(Predicate<TopicPartition> partitionsFilter) { public InternalConsumerGroup retainDataForPartitions(Predicate<TopicPartition> partitionsFilter) {
var offsets = getOffsets().entrySet().stream() var offsetsMap = getOffsets().entrySet().stream()
.filter(e -> partitionsFilter.test(e.getKey())) .filter(e -> partitionsFilter.test(e.getKey()))
.collect(Collectors.toMap( .collect(Collectors.toMap(
Map.Entry::getKey, Map.Entry::getKey,
Map.Entry::getValue Map.Entry::getValue
)); ));
var members = getMembers().stream() var nonEmptyMembers = getMembers().stream()
.map(m -> filterConsumerMemberTopic(m, partitionsFilter)) .map(m -> filterConsumerMemberTopic(m, partitionsFilter))
.filter(m -> !m.getAssignment().isEmpty()) .filter(m -> !m.getAssignment().isEmpty())
.collect(Collectors.toList()); .collect(Collectors.toList());
return toBuilder() return toBuilder()
.offsets(offsets) .offsets(offsetsMap)
.members(members) .members(nonEmptyMembers)
.build(); .build();
} }

View file

@ -12,7 +12,7 @@ public class InternalSchemaRegistry {
private final List<String> url; private final List<String> url;
public String getFirstUrl() { public String getFirstUrl() {
return url != null && !url.isEmpty() ? url.iterator().next() : null; return url != null && !url.isEmpty() ? url.iterator().next() : null;
} }
} }

View file

@ -30,6 +30,6 @@ public class KafkaCluster {
private final String protobufMessageName; private final String protobufMessageName;
private final Map<String, String> protobufMessageNameByTopic; private final Map<String, String> protobufMessageNameByTopic;
private final Properties properties; private final Properties properties;
private final Boolean readOnly; private final boolean readOnly;
private final Boolean disableLogDirsCollection; private final boolean disableLogDirsCollection;
} }

View file

@ -2,7 +2,6 @@ package com.provectus.kafka.ui.model.schemaregistry;
import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonInclude;
import com.provectus.kafka.ui.model.SchemaTypeDTO; import com.provectus.kafka.ui.model.SchemaTypeDTO;
import com.provectus.kafka.ui.model.SchemaTypeDTO;
import lombok.Data; import lombok.Data;
@Data @Data

View file

@ -87,8 +87,8 @@ public class ProtobufFileRecordSerDe implements RecordSerDe {
@SneakyThrows @SneakyThrows
private String parse(byte[] value, Descriptor descriptor) { private String parse(byte[] value, Descriptor descriptor) {
DynamicMessage protoMsg = DynamicMessage.parseFrom( DynamicMessage protoMsg = DynamicMessage.parseFrom(
descriptor, descriptor,
new ByteArrayInputStream(value) new ByteArrayInputStream(value)
); );
byte[] jsonFromProto = ProtobufSchemaUtils.toJson(protoMsg); byte[] jsonFromProto = ProtobufSchemaUtils.toJson(protoMsg);
return new String(jsonFromProto); return new String(jsonFromProto);
@ -121,8 +121,8 @@ public class ProtobufFileRecordSerDe implements RecordSerDe {
public TopicMessageSchemaDTO getTopicSchema(String topic) { public TopicMessageSchemaDTO getTopicSchema(String topic) {
final JsonSchema jsonSchema = schemaConverter.convert( final JsonSchema jsonSchema = schemaConverter.convert(
protobufSchemaPath.toUri(), protobufSchemaPath.toUri(),
getDescriptor(topic) getDescriptor(topic)
); );
final MessageSchemaDTO keySchema = new MessageSchemaDTO() final MessageSchemaDTO keySchema = new MessageSchemaDTO()
.name(protobufSchema.fullName()) .name(protobufSchema.fullName())

View file

@ -14,12 +14,18 @@ public interface RecordSerDe {
@Value @Value
@Builder @Builder
class DeserializedKeyValue { class DeserializedKeyValue {
@Nullable String key; @Nullable
@Nullable String value; String key;
@Nullable MessageFormat keyFormat; @Nullable
@Nullable MessageFormat valueFormat; String value;
@Nullable String keySchemaId; @Nullable
@Nullable String valueSchemaId; MessageFormat keyFormat;
@Nullable
MessageFormat valueFormat;
@Nullable
String keySchemaId;
@Nullable
String valueSchemaId;
} }
DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg); DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg);

View file

@ -6,8 +6,8 @@ import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata; import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.KafkaAvroSerializer; import io.confluent.kafka.serializers.KafkaAvroSerializer;
import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.Serializer;
@ -27,8 +27,8 @@ public class AvroMessageReader extends MessageReader<Object> {
serializer.configure( serializer.configure(
Map.of( Map.of(
"schema.registry.url", "wontbeused", "schema.registry.url", "wontbeused",
KafkaAvroSerializerConfig.AUTO_REGISTER_SCHEMAS, false, AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
KafkaAvroSerializerConfig.USE_LATEST_VERSION, true AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
), ),
isKey isKey
); );

View file

@ -10,8 +10,8 @@ import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import io.confluent.kafka.schemaregistry.json.JsonSchema; import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer; import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializerConfig;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.Serializer;
@ -33,8 +33,8 @@ public class JsonSchemaMessageReader extends MessageReader<JsonNode> {
serializer.configure( serializer.configure(
Map.of( Map.of(
"schema.registry.url", "wontbeused", "schema.registry.url", "wontbeused",
KafkaJsonSchemaSerializerConfig.AUTO_REGISTER_SCHEMAS, false, AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
KafkaJsonSchemaSerializerConfig.USE_LATEST_VERSION, true AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
), ),
isKey isKey
); );
@ -69,10 +69,10 @@ public class JsonSchemaMessageReader extends MessageReader<JsonNode> {
* possible in our case. So, we just skip all infer logic and pass schema directly. * possible in our case. So, we just skip all infer logic and pass schema directly.
*/ */
@Override @Override
public byte[] serialize(String topic, JsonNode record) { public byte[] serialize(String topic, JsonNode rec) {
return super.serializeImpl( return super.serializeImpl(
super.getSubjectName(topic, isKey, record, schema), super.getSubjectName(topic, isKey, rec, schema),
record, rec,
(JsonSchema) schema (JsonSchema) schema
); );
} }

View file

@ -8,8 +8,8 @@ import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient; import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException; import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema; import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer; import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializerConfig;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import org.apache.kafka.common.serialization.Serializer; import org.apache.kafka.common.serialization.Serializer;
@ -28,8 +28,8 @@ public class ProtobufMessageReader extends MessageReader<Message> {
serializer.configure( serializer.configure(
Map.of( Map.of(
"schema.registry.url", "wontbeused", "schema.registry.url", "wontbeused",
KafkaProtobufSerializerConfig.AUTO_REGISTER_SCHEMAS, false, AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
KafkaProtobufSerializerConfig.USE_LATEST_VERSION, true AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
), ),
isKey isKey
); );

View file

@ -113,7 +113,7 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
DeserializedKeyValueBuilder builder) { DeserializedKeyValueBuilder builder) {
Optional<Integer> schemaId = extractSchemaIdFromMsg(rec, isKey); Optional<Integer> schemaId = extractSchemaIdFromMsg(rec, isKey);
Optional<MessageFormat> format = schemaId.flatMap(this::getMessageFormatBySchemaId); Optional<MessageFormat> format = schemaId.flatMap(this::getMessageFormatBySchemaId);
if (format.isPresent() && schemaRegistryFormatters.containsKey(format.get())) { if (schemaId.isPresent() && format.isPresent() && schemaRegistryFormatters.containsKey(format.get())) {
var formatter = schemaRegistryFormatters.get(format.get()); var formatter = schemaRegistryFormatters.get(format.get());
try { try {
var deserialized = formatter.format(rec.topic(), isKey ? rec.key().get() : rec.value().get()); var deserialized = formatter.format(rec.topic(), isKey ? rec.key().get() : rec.value().get());
@ -135,12 +135,13 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
// fallback // fallback
if (isKey) { if (isKey) {
builder.key(FALLBACK_FORMATTER.format(rec.topic(), isKey ? rec.key().get() : rec.value().get())); builder.key(FALLBACK_FORMATTER.format(rec.topic(), rec.key().get()));
builder.keyFormat(FALLBACK_FORMATTER.getFormat()); builder.keyFormat(FALLBACK_FORMATTER.getFormat());
} else { } else {
builder.value(FALLBACK_FORMATTER.format(rec.topic(), isKey ? rec.key().get() : rec.value().get())); builder.value(FALLBACK_FORMATTER.format(rec.topic(), rec.value().get()));
builder.valueFormat(FALLBACK_FORMATTER.getFormat()); builder.valueFormat(FALLBACK_FORMATTER.getFormat());
} }
} }
@Override @Override
@ -202,14 +203,14 @@ public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
final MessageSchemaDTO keySchema = new MessageSchemaDTO() final MessageSchemaDTO keySchema = new MessageSchemaDTO()
.name(maybeKeySchema.map( .name(maybeKeySchema.map(
(s) -> schemaSubject(topic, true) s -> schemaSubject(topic, true)
).orElse("unknown")) ).orElse("unknown"))
.source(MessageSchemaDTO.SourceEnum.SCHEMA_REGISTRY) .source(MessageSchemaDTO.SourceEnum.SCHEMA_REGISTRY)
.schema(sourceKeySchema); .schema(sourceKeySchema);
final MessageSchemaDTO valueSchema = new MessageSchemaDTO() final MessageSchemaDTO valueSchema = new MessageSchemaDTO()
.name(maybeValueSchema.map( .name(maybeValueSchema.map(
(s) -> schemaSubject(topic, false) s -> schemaSubject(topic, false)
).orElse("unknown")) ).orElse("unknown"))
.source(MessageSchemaDTO.SourceEnum.SCHEMA_REGISTRY) .source(MessageSchemaDTO.SourceEnum.SCHEMA_REGISTRY)
.schema(sourceValueSchema); .schema(sourceValueSchema);

View file

@ -105,7 +105,7 @@ public class BrokerService {
Map<TopicPartitionReplica, String> req = Map.of( Map<TopicPartitionReplica, String> req = Map.of(
new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker), new TopicPartitionReplica(b.getTopic(), b.getPartition(), broker),
b.getLogDir()); b.getLogDir());
return admin.alterReplicaLogDirs(req) return admin.alterReplicaLogDirs(req)
.onErrorResume(UnknownTopicOrPartitionException.class, .onErrorResume(UnknownTopicOrPartitionException.class,
e -> Mono.error(new TopicOrPartitionNotFoundException())) e -> Mono.error(new TopicOrPartitionNotFoundException()))
.onErrorResume(LogDirNotFoundException.class, .onErrorResume(LogDirNotFoundException.class,

View file

@ -11,7 +11,7 @@ import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Properties; import java.util.Properties;
import java.util.UUID; import java.util.UUID;
import java.util.function.Function; import java.util.function.ToIntFunction;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import javax.annotation.Nullable; import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor; import lombok.RequiredArgsConstructor;
@ -141,18 +141,25 @@ public class ConsumerGroupService {
case NAME: case NAME:
return Comparator.comparing(ConsumerGroupDescription::groupId); return Comparator.comparing(ConsumerGroupDescription::groupId);
case STATE: case STATE:
Function<ConsumerGroupDescription, Integer> statesPriorities = cg -> { ToIntFunction<ConsumerGroupDescription> statesPriorities = cg -> {
switch (cg.state()) { switch (cg.state()) {
case STABLE: return 0; case STABLE:
case COMPLETING_REBALANCE: return 1; return 0;
case PREPARING_REBALANCE: return 2; case COMPLETING_REBALANCE:
case EMPTY: return 3; return 1;
case DEAD: return 4; case PREPARING_REBALANCE:
case UNKNOWN: return 5; return 2;
default: return 100; case EMPTY:
return 3;
case DEAD:
return 4;
case UNKNOWN:
return 5;
default:
return 100;
} }
}; };
return Comparator.comparingInt(statesPriorities::apply); return Comparator.comparingInt(statesPriorities);
case MEMBERS: case MEMBERS:
return Comparator.comparingInt(cg -> -cg.members().size()); return Comparator.comparingInt(cg -> -cg.members().size());
default: default:

View file

@ -44,7 +44,7 @@ public class FeatureService {
if (controller != null) { if (controller != null) {
features.add( features.add(
isTopicDeletionEnabled(cluster, controller) isTopicDeletionEnabled(cluster, controller)
.flatMap(r -> r ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty()) .flatMap(r -> Boolean.TRUE.equals(r) ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty())
); );
} }

View file

@ -30,7 +30,7 @@ class KafkaConfigSanitizer extends Sanitizer {
var keysToSanitize = new HashSet<>( var keysToSanitize = new HashSet<>(
patternsToSanitize.isEmpty() ? DEFAULT_PATTERNS_TO_SANITIZE : patternsToSanitize); patternsToSanitize.isEmpty() ? DEFAULT_PATTERNS_TO_SANITIZE : patternsToSanitize);
keysToSanitize.addAll(kafkaConfigKeysToSanitize()); keysToSanitize.addAll(kafkaConfigKeysToSanitize());
setKeysToSanitize(keysToSanitize.toArray(new String[]{})); setKeysToSanitize(keysToSanitize.toArray(new String[] {}));
} }
} }

View file

@ -103,19 +103,19 @@ public class KafkaConnectService {
} }
private Predicate<FullConnectorInfoDTO> matchesSearchTerm(final String search) { private Predicate<FullConnectorInfoDTO> matchesSearchTerm(final String search) {
return (connector) -> getSearchValues(connector) return connector -> getSearchValues(connector)
.anyMatch(value -> value.contains( .anyMatch(value -> value.contains(
StringUtils.defaultString( StringUtils.defaultString(
search, search,
StringUtils.EMPTY) StringUtils.EMPTY)
.toUpperCase())); .toUpperCase()));
} }
private Stream<String> getSearchValues(FullConnectorInfoDTO fullConnectorInfo) { private Stream<String> getSearchValues(FullConnectorInfoDTO fullConnectorInfo) {
return Stream.of( return Stream.of(
fullConnectorInfo.getName(), fullConnectorInfo.getName(),
fullConnectorInfo.getStatus().getState().getValue(), fullConnectorInfo.getStatus().getState().getValue(),
fullConnectorInfo.getType().getValue()) fullConnectorInfo.getType().getValue())
.map(String::toUpperCase); .map(String::toUpperCase);
} }
@ -158,7 +158,7 @@ public class KafkaConnectService {
connector connector
.flatMap(c -> connectorExists(cluster, connectName, c.getName()) .flatMap(c -> connectorExists(cluster, connectName, c.getName())
.map(exists -> { .map(exists -> {
if (exists) { if (Boolean.TRUE.equals(exists)) {
throw new ValidationException( throw new ValidationException(
String.format("Connector with name %s already exists", c.getName())); String.format("Connector with name %s already exists", c.getName()));
} }
@ -179,7 +179,7 @@ public class KafkaConnectService {
} }
public Mono<ConnectorDTO> getConnector(KafkaCluster cluster, String connectName, public Mono<ConnectorDTO> getConnector(KafkaCluster cluster, String connectName,
String connectorName) { String connectorName) {
return withConnectClient(cluster, connectName) return withConnectClient(cluster, connectName)
.flatMap(client -> client.getConnector(connectorName) .flatMap(client -> client.getConnector(connectorName)
.map(kafkaConnectMapper::fromClient) .map(kafkaConnectMapper::fromClient)
@ -240,8 +240,8 @@ public class KafkaConnectService {
} }
public Mono<ConnectorDTO> setConnectorConfig(KafkaCluster cluster, String connectName, public Mono<ConnectorDTO> setConnectorConfig(KafkaCluster cluster, String connectName,
String connectorName, Mono<Object> requestBody) { String connectorName, Mono<Object> requestBody) {
return withConnectClient(cluster, connectName) return withConnectClient(cluster, connectName)
.flatMap(c -> .flatMap(c ->
requestBody requestBody
.flatMap(body -> c.setConnectorConfig(connectorName, (Map<String, Object>) body)) .flatMap(body -> c.setConnectorConfig(connectorName, (Map<String, Object>) body))

View file

@ -20,7 +20,7 @@ public class KsqlService {
private final List<BaseStrategy> ksqlStatementStrategies; private final List<BaseStrategy> ksqlStatementStrategies;
public Mono<KsqlCommandResponseDTO> executeKsqlCommand(KafkaCluster cluster, public Mono<KsqlCommandResponseDTO> executeKsqlCommand(KafkaCluster cluster,
Mono<KsqlCommandDTO> ksqlCommand) { Mono<KsqlCommandDTO> ksqlCommand) {
return Mono.justOrEmpty(cluster) return Mono.justOrEmpty(cluster)
.map(KafkaCluster::getKsqldbServer) .map(KafkaCluster::getKsqldbServer)
.onErrorResume(e -> { .onErrorResume(e -> {

View file

@ -84,7 +84,7 @@ public class MessagesService {
CreateTopicMessageDTO msg) { CreateTopicMessageDTO msg) {
if (msg.getPartition() != null if (msg.getPartition() != null
&& msg.getPartition() > metricsCache.get(cluster).getTopicDescriptions() && msg.getPartition() > metricsCache.get(cluster).getTopicDescriptions()
.get(topic).partitions().size() - 1) { .get(topic).partitions().size() - 1) {
throw new ValidationException("Invalid partition"); throw new ValidationException("Invalid partition");
} }
RecordSerDe serde = RecordSerDe serde =

View file

@ -60,7 +60,7 @@ public class MetricsService {
} }
private Mono<InternalLogDirStats> getLogDirInfo(KafkaCluster cluster, ReactiveAdminClient c) { private Mono<InternalLogDirStats> getLogDirInfo(KafkaCluster cluster, ReactiveAdminClient c) {
if (cluster.getDisableLogDirsCollection() == null || !cluster.getDisableLogDirsCollection()) { if (!cluster.isDisableLogDirsCollection()) {
return c.describeLogDirs().map(InternalLogDirStats::new); return c.describeLogDirs().map(InternalLogDirStats::new);
} }
return Mono.just(InternalLogDirStats.empty()); return Mono.just(InternalLogDirStats.empty());

View file

@ -5,7 +5,7 @@ import static org.springframework.http.HttpStatus.UNPROCESSABLE_ENTITY;
import com.provectus.kafka.ui.exception.SchemaFailedToDeleteException; import com.provectus.kafka.ui.exception.SchemaFailedToDeleteException;
import com.provectus.kafka.ui.exception.SchemaNotFoundException; import com.provectus.kafka.ui.exception.SchemaNotFoundException;
import com.provectus.kafka.ui.exception.SchemaTypeIsNotSupportedException; import com.provectus.kafka.ui.exception.SchemaTypeNotSupportedException;
import com.provectus.kafka.ui.exception.UnprocessableEntityException; import com.provectus.kafka.ui.exception.UnprocessableEntityException;
import com.provectus.kafka.ui.exception.ValidationException; import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.mapper.ClusterMapper; import com.provectus.kafka.ui.mapper.ClusterMapper;
@ -212,7 +212,7 @@ public class SchemaRegistryService {
.onStatus(UNPROCESSABLE_ENTITY::equals, .onStatus(UNPROCESSABLE_ENTITY::equals,
r -> r.bodyToMono(ErrorResponse.class) r -> r.bodyToMono(ErrorResponse.class)
.flatMap(x -> Mono.error(isUnrecognizedFieldSchemaTypeMessage(x.getMessage()) .flatMap(x -> Mono.error(isUnrecognizedFieldSchemaTypeMessage(x.getMessage())
? new SchemaTypeIsNotSupportedException() ? new SchemaTypeNotSupportedException()
: new UnprocessableEntityException(x.getMessage())))) : new UnprocessableEntityException(x.getMessage()))))
.bodyToMono(SubjectIdResponse.class); .bodyToMono(SubjectIdResponse.class);
} }
@ -294,7 +294,9 @@ public class SchemaRegistryService {
} }
public String formatted(String str, Object... args) { public String formatted(String str, Object... args) {
return new Formatter().format(str, args).toString(); try (Formatter formatter = new Formatter()) {
return formatter.format(str, args).toString();
}
} }
private void setBasicAuthIfEnabled(InternalSchemaRegistry schemaRegistry, HttpHeaders headers) { private void setBasicAuthIfEnabled(InternalSchemaRegistry schemaRegistry, HttpHeaders headers) {

View file

@ -80,14 +80,14 @@ public class TopicsService {
Optional<TopicColumnsToSortDTO> sortBy, Optional<TopicColumnsToSortDTO> sortBy,
Optional<SortOrderDTO> sortOrder) { Optional<SortOrderDTO> sortOrder) {
return adminClientService.get(cluster).flatMap(ac -> return adminClientService.get(cluster).flatMap(ac ->
new Pagination(ac, metricsCache.get(cluster)) new Pagination(ac, metricsCache.get(cluster))
.getPage(pageNum, nullablePerPage, showInternal, search, sortBy, sortOrder) .getPage(pageNum, nullablePerPage, showInternal, search, sortBy, sortOrder)
.flatMap(page -> .flatMap(page ->
loadTopics(cluster, page.getTopics()) loadTopics(cluster, page.getTopics())
.map(topics -> .map(topics ->
new TopicsResponseDTO() new TopicsResponseDTO()
.topics(topics.stream().map(clusterMapper::toTopic).collect(toList())) .topics(topics.stream().map(clusterMapper::toTopic).collect(toList()))
.pageCount(page.getTotalPages())))); .pageCount(page.getTotalPages()))));
} }
private Mono<List<InternalTopic>> loadTopics(KafkaCluster c, List<String> topics) { private Mono<List<InternalTopic>> loadTopics(KafkaCluster c, List<String> topics) {
@ -193,31 +193,31 @@ public class TopicsService {
public Mono<TopicDTO> recreateTopic(KafkaCluster cluster, String topicName) { public Mono<TopicDTO> recreateTopic(KafkaCluster cluster, String topicName) {
return loadTopic(cluster, topicName) return loadTopic(cluster, topicName)
.flatMap(t -> deleteTopic(cluster, topicName) .flatMap(t -> deleteTopic(cluster, topicName)
.thenReturn(t).delayElement(Duration.ofSeconds(recreateDelayInSeconds)) .thenReturn(t).delayElement(Duration.ofSeconds(recreateDelayInSeconds))
.flatMap(topic -> adminClientService.get(cluster).flatMap(ac -> ac.createTopic(topic.getName(), .flatMap(topic -> adminClientService.get(cluster).flatMap(ac -> ac.createTopic(topic.getName(),
topic.getPartitionCount(), topic.getPartitionCount(),
(short) topic.getReplicationFactor(), (short) topic.getReplicationFactor(),
topic.getTopicConfigs() topic.getTopicConfigs()
.stream() .stream()
.collect(Collectors .collect(Collectors
.toMap(InternalTopicConfig::getName, .toMap(InternalTopicConfig::getName,
InternalTopicConfig::getValue))) InternalTopicConfig::getValue)))
.thenReturn(topicName)) .thenReturn(topicName))
.retryWhen(Retry.fixedDelay(recreateMaxRetries, .retryWhen(Retry.fixedDelay(recreateMaxRetries,
Duration.ofSeconds(recreateDelayInSeconds)) Duration.ofSeconds(recreateDelayInSeconds))
.filter(throwable -> throwable instanceof TopicExistsException) .filter(TopicExistsException.class::isInstance)
.onRetryExhaustedThrow((a, b) -> .onRetryExhaustedThrow((a, b) ->
new TopicRecreationException(topicName, new TopicRecreationException(topicName,
recreateMaxRetries * recreateDelayInSeconds))) recreateMaxRetries * recreateDelayInSeconds)))
.flatMap(a -> loadTopic(cluster, topicName)).map(clusterMapper::toTopic) .flatMap(a -> loadTopic(cluster, topicName)).map(clusterMapper::toTopic)
) )
); );
} }
private Mono<InternalTopic> updateTopic(KafkaCluster cluster, private Mono<InternalTopic> updateTopic(KafkaCluster cluster,
String topicName, String topicName,
TopicUpdateDTO topicUpdate) { TopicUpdateDTO topicUpdate) {
return adminClientService.get(cluster) return adminClientService.get(cluster)
.flatMap(ac -> .flatMap(ac ->
ac.updateTopicConfig(topicName, topicUpdate.getConfigs()) ac.updateTopicConfig(topicName, topicUpdate.getConfigs())
@ -403,10 +403,11 @@ public class TopicsService {
); );
return ac.createPartitions(newPartitionsMap) return ac.createPartitions(newPartitionsMap)
.then(loadTopic(cluster, topicName)); .then(loadTopic(cluster, topicName));
}) }).map(t -> new PartitionsIncreaseResponseDTO()
.map(t -> new PartitionsIncreaseResponseDTO()
.topicName(t.getName()) .topicName(t.getName())
.totalPartitionsCount(t.getPartitionCount()))); .totalPartitionsCount(t.getPartitionCount())
)
);
} }
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) { public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {

View file

@ -15,7 +15,6 @@ import org.apache.zookeeper.ZooKeeper;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils; import org.springframework.util.StringUtils;
import reactor.core.publisher.Mono; import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
@Service @Service
@RequiredArgsConstructor @RequiredArgsConstructor
@ -82,10 +81,11 @@ public class ZookeeperService {
private ZooKeeper createClient(KafkaCluster cluster) { private ZooKeeper createClient(KafkaCluster cluster) {
try { try {
return new ZooKeeper(cluster.getZookeeper(), 60 * 1000, watchedEvent -> {}); return new ZooKeeper(cluster.getZookeeper(), 60 * 1000, watchedEvent -> {
});
} catch (IOException e) { } catch (IOException e) {
log.error("Error while creating a zookeeper client for cluster [{}]", log.error("Error while creating a zookeeper client for cluster [{}]",
cluster.getName()); cluster.getName());
throw new ZooKeeperException(e); throw new ZooKeeperException(e);
} }
} }

View file

@ -101,7 +101,7 @@ public class KsqlApiClient {
if (parsed.getStatements().size() > 1) { if (parsed.getStatements().size() > 1) {
throw new ValidationException("Only single statement supported now"); throw new ValidationException("Only single statement supported now");
} }
if (parsed.getStatements().size() == 0) { if (parsed.getStatements().isEmpty()) {
throw new ValidationException("No valid ksql statement found"); throw new ValidationException("No valid ksql statement found");
} }
if (KsqlGrammar.isSelect(parsed.getStatements().get(0))) { if (KsqlGrammar.isSelect(parsed.getStatements().get(0))) {

View file

@ -18,6 +18,9 @@ import org.antlr.v4.runtime.atn.PredictionMode;
class KsqlGrammar { class KsqlGrammar {
private KsqlGrammar() {
}
@Value @Value
static class KsqlStatements { static class KsqlStatements {
List<KsqlGrammarParser.SingleStatementContext> statements; List<KsqlGrammarParser.SingleStatementContext> statements;

View file

@ -12,6 +12,9 @@ import java.util.stream.StreamSupport;
class DynamicParser { class DynamicParser {
private DynamicParser() {
}
static KsqlResponseTable parseArray(String tableName, JsonNode array) { static KsqlResponseTable parseArray(String tableName, JsonNode array) {
return parseArray(tableName, getFieldNamesFromArray(array), array); return parseArray(tableName, getFieldNamesFromArray(array), array);
} }

View file

@ -14,6 +14,9 @@ import org.springframework.web.reactive.function.client.WebClientResponseExcepti
public class ResponseParser { public class ResponseParser {
private ResponseParser() {
}
public static Optional<KsqlApiClient.KsqlResponseTable> parseSelectResponse(JsonNode jsonNode) { public static Optional<KsqlApiClient.KsqlResponseTable> parseSelectResponse(JsonNode jsonNode) {
// in response we getting either header record or row data // in response we getting either header record or row data
if (arrayFieldNonEmpty(jsonNode, "header")) { if (arrayFieldNonEmpty(jsonNode, "header")) {

View file

@ -18,6 +18,9 @@ import org.apache.kafka.common.utils.Bytes;
@Slf4j @Slf4j
public class ClusterUtil { public class ClusterUtil {
private ClusterUtil() {
}
private static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC"); private static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC");
public static int convertToIntServerStatus(ServerStatusDTO serverStatus) { public static int convertToIntServerStatus(ServerStatusDTO serverStatus) {

View file

@ -86,14 +86,14 @@ public class JmxClusterUtil {
@SneakyThrows @SneakyThrows
private List<MetricDTO> getJmxMetrics(String host, int port, boolean jmxSsl, private List<MetricDTO> getJmxMetrics(String host, int port, boolean jmxSsl,
@Nullable String username, @Nullable String password) { @Nullable String username, @Nullable String password) {
String jmxUrl = JMX_URL + host + ":" + port + "/" + JMX_SERVICE_TYPE; String jmxUrl = JMX_URL + host + ":" + port + "/" + JMX_SERVICE_TYPE;
final var connectionInfo = JmxConnectionInfo.builder() final var connectionInfo = JmxConnectionInfo.builder()
.url(jmxUrl) .url(jmxUrl)
.ssl(jmxSsl) .ssl(jmxSsl)
.username(username) .username(username)
.password(password) .password(password)
.build(); .build();
JMXConnector srv; JMXConnector srv;
try { try {
srv = pool.borrowObject(connectionInfo); srv = pool.borrowObject(connectionInfo);

View file

@ -21,7 +21,7 @@ public class JmxPoolFactory extends BaseKeyedPooledObjectFactory<JmxConnectionIn
public JMXConnector create(JmxConnectionInfo info) throws Exception { public JMXConnector create(JmxConnectionInfo info) throws Exception {
Map<String, Object> env = new HashMap<>(); Map<String, Object> env = new HashMap<>();
if (StringUtils.isNotEmpty(info.getUsername()) && StringUtils.isNotEmpty(info.getPassword())) { if (StringUtils.isNotEmpty(info.getUsername()) && StringUtils.isNotEmpty(info.getPassword())) {
env.put("jmx.remote.credentials", new String[]{info.getUsername(), info.getPassword()}); env.put("jmx.remote.credentials", new String[] {info.getUsername(), info.getPassword()});
} }
if (info.isSsl()) { if (info.isSsl()) {

View file

@ -30,20 +30,21 @@ import java.util.Map;
public final class KafkaConstants { public final class KafkaConstants {
private static final String LONG_MAX_STRING = Long.valueOf(Long.MAX_VALUE).toString();
public static final Map<String, String> TOPIC_DEFAULT_CONFIGS = Map.ofEntries( public static final Map<String, String> TOPIC_DEFAULT_CONFIGS = Map.ofEntries(
new AbstractMap.SimpleEntry<>(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE), new AbstractMap.SimpleEntry<>(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE),
new AbstractMap.SimpleEntry<>(COMPRESSION_TYPE_CONFIG, "producer"), new AbstractMap.SimpleEntry<>(COMPRESSION_TYPE_CONFIG, "producer"),
new AbstractMap.SimpleEntry<>(DELETE_RETENTION_MS_CONFIG, "86400000"), new AbstractMap.SimpleEntry<>(DELETE_RETENTION_MS_CONFIG, "86400000"),
new AbstractMap.SimpleEntry<>(FILE_DELETE_DELAY_MS_CONFIG, "60000"), new AbstractMap.SimpleEntry<>(FILE_DELETE_DELAY_MS_CONFIG, "60000"),
new AbstractMap.SimpleEntry<>(FLUSH_MESSAGES_INTERVAL_CONFIG, "9223372036854775807"), new AbstractMap.SimpleEntry<>(FLUSH_MESSAGES_INTERVAL_CONFIG, LONG_MAX_STRING),
new AbstractMap.SimpleEntry<>(FLUSH_MS_CONFIG, "9223372036854775807"), new AbstractMap.SimpleEntry<>(FLUSH_MS_CONFIG, LONG_MAX_STRING),
new AbstractMap.SimpleEntry<>("follower.replication.throttled.replicas", ""), new AbstractMap.SimpleEntry<>("follower.replication.throttled.replicas", ""),
new AbstractMap.SimpleEntry<>(INDEX_INTERVAL_BYTES_CONFIG, "4096"), new AbstractMap.SimpleEntry<>(INDEX_INTERVAL_BYTES_CONFIG, "4096"),
new AbstractMap.SimpleEntry<>("leader.replication.throttled.replicas", ""), new AbstractMap.SimpleEntry<>("leader.replication.throttled.replicas", ""),
new AbstractMap.SimpleEntry<>(MAX_COMPACTION_LAG_MS_CONFIG, "9223372036854775807"), new AbstractMap.SimpleEntry<>(MAX_COMPACTION_LAG_MS_CONFIG, LONG_MAX_STRING),
new AbstractMap.SimpleEntry<>(MAX_MESSAGE_BYTES_CONFIG, "1000012"), new AbstractMap.SimpleEntry<>(MAX_MESSAGE_BYTES_CONFIG, "1000012"),
new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, LONG_MAX_STRING),
"9223372036854775807"),
new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime"), new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime"),
new AbstractMap.SimpleEntry<>(MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.5"), new AbstractMap.SimpleEntry<>(MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.5"),
new AbstractMap.SimpleEntry<>(MIN_COMPACTION_LAG_MS_CONFIG, "0"), new AbstractMap.SimpleEntry<>(MIN_COMPACTION_LAG_MS_CONFIG, "0"),

View file

@ -107,8 +107,8 @@ public abstract class OffsetsSeek {
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2)); .collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
this.beginOffsets = this.endOffsets.keySet().stream() this.beginOffsets = this.endOffsets.keySet().stream()
.map(p -> Tuples.of(p, allBeginningOffsets.get(new TopicPartition(topic, p)))) .map(p -> Tuples.of(p, allBeginningOffsets.get(new TopicPartition(topic, p))))
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2)); .collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
} }
public List<TopicPartition> topicPartitions() { public List<TopicPartition> topicPartitions() {

View file

@ -36,13 +36,13 @@ public class OffsetsSeekBackward extends OffsetsSeek {
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer, protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
List<TopicPartition> partitions) { List<TopicPartition> partitions) {
return findOffsetsInt(consumer, consumerPosition.getSeekTo(), partitions); return findOffsetsInt(consumer, consumerPosition.getSeekTo(), partitions);
} }
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer, protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
List<TopicPartition> partitions) { List<TopicPartition> partitions) {
return findOffsets(consumer, Map.of(), partitions); return findOffsets(consumer, Map.of(), partitions);
} }
@ -51,7 +51,7 @@ public class OffsetsSeekBackward extends OffsetsSeek {
consumerPosition.getSeekTo().entrySet().stream() consumerPosition.getSeekTo().entrySet().stream()
.collect(Collectors.toMap( .collect(Collectors.toMap(
Map.Entry::getKey, Map.Entry::getKey,
e -> e.getValue() Map.Entry::getValue
)); ));
Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch) Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
.entrySet().stream() .entrySet().stream()

View file

@ -19,7 +19,7 @@ public class OffsetsSeekForward extends OffsetsSeek {
} }
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer, protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
List<TopicPartition> partitions) { List<TopicPartition> partitions) {
final Map<TopicPartition, Long> offsets = final Map<TopicPartition, Long> offsets =
offsetsFromBeginning(consumer, partitions); offsetsFromBeginning(consumer, partitions);
@ -54,7 +54,7 @@ public class OffsetsSeekForward extends OffsetsSeek {
} }
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer, protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
List<TopicPartition> partitions) { List<TopicPartition> partitions) {
return consumer.beginningOffsets(partitions); return consumer.beginningOffsets(partitions);
} }

View file

@ -59,7 +59,8 @@ public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
} }
case ARRAY: case ARRAY:
return createArraySchema(name, schema, definitions); return createArraySchema(name, schema, definitions);
default: throw new RuntimeException("Unknown type"); default:
throw new RuntimeException("Unknown type");
} }
} else { } else {
return createUnionSchema(schema, definitions); return createUnionSchema(schema, definitions);
@ -87,9 +88,9 @@ public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
if (nullable) { if (nullable) {
return new OneOfFieldSchema( return new OneOfFieldSchema(
List.of( List.of(
new SimpleFieldSchema(new SimpleJsonType(JsonType.Type.NULL)), new SimpleFieldSchema(new SimpleJsonType(JsonType.Type.NULL)),
new ObjectFieldSchema(fields, Collections.emptyList()) new ObjectFieldSchema(fields, Collections.emptyList())
) )
); );
} else { } else {
return new ObjectFieldSchema(fields, Collections.emptyList()); return new ObjectFieldSchema(fields, Collections.emptyList());
@ -138,14 +139,18 @@ public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
case BYTES: case BYTES:
case STRING: case STRING:
return new SimpleJsonType(JsonType.Type.STRING); return new SimpleJsonType(JsonType.Type.STRING);
case NULL: return new SimpleJsonType(JsonType.Type.NULL); case NULL:
case ARRAY: return new SimpleJsonType(JsonType.Type.ARRAY); return new SimpleJsonType(JsonType.Type.NULL);
case ARRAY:
return new SimpleJsonType(JsonType.Type.ARRAY);
case FIXED: case FIXED:
case FLOAT: case FLOAT:
case DOUBLE: case DOUBLE:
return new SimpleJsonType(JsonType.Type.NUMBER); return new SimpleJsonType(JsonType.Type.NUMBER);
case BOOLEAN: return new SimpleJsonType(JsonType.Type.BOOLEAN); case BOOLEAN:
default: return new SimpleJsonType(JsonType.Type.STRING); return new SimpleJsonType(JsonType.Type.BOOLEAN);
default:
return new SimpleJsonType(JsonType.Type.STRING);
} }
} }
} }

View file

@ -8,7 +8,7 @@ public abstract class JsonType {
protected final Type type; protected final Type type;
public JsonType(Type type) { protected JsonType(Type type) {
this.type = type; this.type = type;
} }

View file

@ -2,10 +2,7 @@ package com.provectus.kafka.ui.util.jsonschema;
import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.stream.Collectors; import java.util.stream.Collectors;

View file

@ -41,9 +41,9 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
Tuples.of( Tuples.of(
o.getName(), o.getName(),
new OneOfFieldSchema( new OneOfFieldSchema(
o.getFields().stream().map( o.getFields().stream().map(
Descriptors.FieldDescriptor::getName Descriptors.FieldDescriptor::getName
).map(fields::get).collect(Collectors.toList()) ).map(fields::get).collect(Collectors.toList())
) )
) )
).collect(Collectors.toMap( ).collect(Collectors.toMap(
@ -52,8 +52,8 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
)); ));
final List<String> allOneOfFields = schema.getOneofs().stream().flatMap(o -> final List<String> allOneOfFields = schema.getOneofs().stream().flatMap(o ->
o.getFields().stream().map(Descriptors.FieldDescriptor::getName) o.getFields().stream().map(Descriptors.FieldDescriptor::getName)
).collect(Collectors.toList()); ).collect(Collectors.toList());
final Map<String, FieldSchema> excludedOneOf = fields.entrySet().stream() final Map<String, FieldSchema> excludedOneOf = fields.entrySet().stream()
.filter(f -> !allOneOfFields.contains(f.getKey())) .filter(f -> !allOneOfFields.contains(f.getKey()))
@ -79,7 +79,7 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
} }
private FieldSchema convertField(Descriptors.FieldDescriptor field, private FieldSchema convertField(Descriptors.FieldDescriptor field,
Map<String, FieldSchema> definitions) { Map<String, FieldSchema> definitions) {
final JsonType jsonType = convertType(field); final JsonType jsonType = convertType(field);
FieldSchema fieldSchema; FieldSchema fieldSchema;