Merge branch 'master' of github.com:provectus/kafka-ui into metrics_ph2
Conflicts: kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AbstractController.java
This commit is contained in:
commit
17680e6cd1
104 changed files with 2559 additions and 1126 deletions
4
.github/workflows/frontend.yaml
vendored
4
.github/workflows/frontend.yaml
vendored
|
@ -23,7 +23,7 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
with:
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
|
@ -49,7 +49,7 @@ jobs:
|
|||
cd kafka-ui-react-app/
|
||||
pnpm test:CI
|
||||
- name: SonarCloud Scan
|
||||
uses: workshur/sonarcloud-github-action@improved_basedir
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
with:
|
||||
projectBaseDir: ./kafka-ui-react-app
|
||||
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
|
||||
|
|
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
|
||||
</p>
|
||||
|
||||
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
|
||||
|
||||
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
#FROM azul/zulu-openjdk-alpine:17-jre-headless
|
||||
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
|
||||
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN apk add --no-cache \
|
||||
# snappy codec
|
||||
gcompat \
|
||||
# configuring timezones
|
||||
tzdata
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
|
|
|
@ -114,6 +114,11 @@
|
|||
<artifactId>json</artifactId>
|
||||
<version>${org.json.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
|
|
@ -58,8 +58,6 @@ public class ClustersProperties {
|
|||
@Data
|
||||
public static class PollingProperties {
|
||||
Integer pollTimeoutMs;
|
||||
Integer partitionPollTimeout;
|
||||
Integer noDataEmptyPolls;
|
||||
Integer maxPageSize;
|
||||
Integer defaultPageSize;
|
||||
}
|
||||
|
@ -178,7 +176,13 @@ public class ClustersProperties {
|
|||
Integer auditTopicsPartitions;
|
||||
Boolean topicAuditEnabled;
|
||||
Boolean consoleAuditEnabled;
|
||||
LogLevel level;
|
||||
Map<String, String> auditTopicProperties;
|
||||
|
||||
public enum LogLevel {
|
||||
ALL,
|
||||
ALTER_ONLY //default
|
||||
}
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
|
|
|
@ -13,6 +13,7 @@ abstract class AbstractAuthSecurityConfig {
|
|||
"/resources/**",
|
||||
"/actuator/health/**",
|
||||
"/actuator/info",
|
||||
"/actuator/prometheus",
|
||||
"/auth",
|
||||
"/login",
|
||||
"/logout",
|
||||
|
|
|
@ -6,13 +6,13 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
|
||||
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
|
||||
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
|
||||
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
|
@ -33,15 +33,19 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
|
||||
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
|
||||
|
||||
return http
|
||||
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
|
||||
.csrf().disable()
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST).permitAll()
|
||||
.anyExchange().authenticated()
|
||||
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
|
||||
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
|
||||
.and().build();
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
|
||||
.logout(spec -> spec
|
||||
.logoutSuccessHandler(logoutSuccessHandler)
|
||||
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,10 +27,12 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
System.exit(1);
|
||||
}
|
||||
log.warn("Authentication is disabled. Access will be unrestricted.");
|
||||
return http.authorizeExchange()
|
||||
.anyExchange().permitAll()
|
||||
.and()
|
||||
.csrf().disable()
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.anyExchange()
|
||||
.permitAll()
|
||||
)
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.springframework.security.authentication.AuthenticationManager;
|
|||
import org.springframework.security.authentication.ProviderManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
|
@ -126,21 +127,15 @@ public class LdapSecurityConfig {
|
|||
log.info("Active Directory support for LDAP has been enabled.");
|
||||
}
|
||||
|
||||
return http
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.formLogin()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(Customizer.withDefaults())
|
||||
.logout(Customizer.withDefaults())
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -12,10 +12,11 @@ import lombok.extern.log4j.Log4j2;
|
|||
import org.jetbrains.annotations.Nullable;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
|
@ -49,21 +50,15 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
|
||||
log.info("Configuring OAUTH2 authentication.");
|
||||
|
||||
return http.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.oauth2Login()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
.logoutSuccessHandler(logoutHandler)
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.oauth2Login(Customizer.withDefaults())
|
||||
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -103,7 +98,10 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
|
||||
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
|
||||
final List<ClientRegistration> registrations =
|
||||
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
|
||||
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
|
||||
if (registrations.isEmpty()) {
|
||||
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
|
||||
}
|
||||
return new InMemoryReactiveClientRegistrationRepository(registrations);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
public class CognitoCondition extends AbstractProviderCondition implements Condition {
|
||||
@Override
|
||||
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
|
||||
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
|
||||
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,12 +2,19 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
||||
public abstract class AbstractController {
|
||||
|
||||
protected ClustersStorage clustersStorage;
|
||||
protected AccessControlService accessControlService;
|
||||
protected AuditService auditService;
|
||||
|
||||
protected KafkaCluster getCluster(String name) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
|
@ -15,8 +22,26 @@ public abstract class AbstractController {
|
|||
String.format("Cluster with name '%s' not found", name)));
|
||||
}
|
||||
|
||||
protected Mono<Void> validateAccess(AccessContext context) {
|
||||
return accessControlService.validateAccess(context);
|
||||
}
|
||||
|
||||
protected void audit(AccessContext acxt, Signal<?> sig) {
|
||||
auditService.audit(acxt, sig);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||
this.clustersStorage = clustersStorage;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAccessControlService(AccessControlService accessControlService) {
|
||||
this.accessControlService = accessControlService;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAuditService(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,14 +2,15 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.api.AclsApi;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.service.acl.AclsService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
|
@ -26,8 +27,6 @@ import reactor.core.publisher.Mono;
|
|||
public class AclsController extends AbstractController implements AclsApi {
|
||||
|
||||
private final AclsService aclsService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
|
@ -38,11 +37,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("createAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -55,11 +54,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("deleteAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -85,12 +84,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
|
||||
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
aclsService.listAcls(getCluster(clusterName), filter)
|
||||
.map(ClusterMapper::toKafkaAclDto)))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -101,11 +100,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("getAclAsCsv")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
aclsService.getAclAsCsvString(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.flatMap(Mono::just)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -117,10 +116,61 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("syncAclsCsv")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(csvMono)
|
||||
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
|
||||
Mono<CreateConsumerAclDTO> createConsumerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createConsumerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createConsumerAclDto)
|
||||
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
|
||||
Mono<CreateProducerAclDTO> createProducerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createProducerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createProducerAclDto)
|
||||
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
|
||||
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createStreamAppAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createStreamAppAclDto)
|
||||
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,8 +15,6 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ApplicationInfoService;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
|
@ -39,7 +37,7 @@ import reactor.util.function.Tuples;
|
|||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
|
@ -51,12 +49,10 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
private final ApplicationInfoService applicationInfoService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
|
||||
|
@ -69,12 +65,12 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(VIEW)
|
||||
.operationName("getCurrentConfig")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -84,14 +80,14 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("restartWithConfig")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(restartRequestDto)
|
||||
.<ResponseEntity<Void>>map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
})
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -101,13 +97,13 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("uploadConfigRelatedFile")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(fileFlux.single())
|
||||
.flatMap(file ->
|
||||
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -117,7 +113,7 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("validateConfig")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(configDto)
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
|
@ -126,7 +122,7 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
|
|
|
@ -36,10 +36,10 @@ public class AuthController {
|
|||
+ " <meta name=\"description\" content=\"\">\n"
|
||||
+ " <meta name=\"author\" content=\"\">\n"
|
||||
+ " <title>Please sign in</title>\n"
|
||||
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
|
||||
+ "crossorigin=\"anonymous\">\n"
|
||||
+ " <link href=\"/static/css/signin.css\" "
|
||||
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
|
||||
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
|
||||
+ " </head>\n"
|
||||
+ " <body>\n"
|
||||
|
|
|
@ -11,8 +11,6 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.service.BrokerService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -31,9 +29,6 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
|
||||
private final AuditService auditService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
|
@ -43,9 +38,9 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.build();
|
||||
|
||||
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,14 +52,14 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("id", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -80,10 +75,10 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerIds", brokerIds))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -97,11 +92,11 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(
|
||||
brokerService.getBrokerConfig(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerConfig))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -116,11 +111,11 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
brokerLogdir
|
||||
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -136,11 +131,11 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
brokerConfig
|
||||
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||
getCluster(clusterName), id, name, bci.getValue()))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,8 +6,6 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
|||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -21,8 +19,6 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class ClustersController extends AbstractController implements ClustersApi {
|
||||
private final ClusterService clusterService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
|
@ -40,13 +36,13 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("getClusterMetrics")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterMetrics(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,13 +53,13 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("getClusterStats")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterStats(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,8 +71,8 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("updateClusterInfo")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
|
@ -42,8 +40,6 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
|
||||
private final ConsumerGroupService consumerGroupService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Value("${consumer.groups.page.size:25}")
|
||||
private int defaultConsumerGroupsPageSize;
|
||||
|
@ -59,9 +55,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("deleteConsumerGroup")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -76,11 +72,11 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("getConsumerGroup")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
.map(ConsumerGroupMapper::toDetailsDto)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,9 +100,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,7 +121,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("getConsumerGroupsPage")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
consumerGroupService.getConsumerGroupsPage(
|
||||
getCluster(clusterName),
|
||||
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
|
||||
|
@ -136,7 +132,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
)
|
||||
.map(this::convertPage)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -191,9 +187,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
}
|
||||
};
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(mono.get())
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}).thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@ import com.provectus.kafka.ui.model.TaskDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -40,8 +38,6 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||
|
@ -64,9 +60,9 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectors")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -81,10 +77,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("createConnector")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,10 +96,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnector")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -119,10 +115,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
|
||||
|
@ -150,7 +146,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.sort(comparator);
|
||||
|
||||
return Mono.just(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -166,11 +162,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectorConfig")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -187,11 +183,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -214,11 +210,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -234,11 +230,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity
|
||||
.ok(kafkaConnectService
|
||||
.getConnectorTasks(getCluster(clusterName), connectName, connectorName))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -254,11 +250,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -272,11 +268,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectorPlugins")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -9,9 +9,7 @@ import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
|
|||
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -29,8 +27,6 @@ import reactor.core.publisher.Mono;
|
|||
public class KsqlController extends AbstractController implements KsqlApi {
|
||||
|
||||
private final KsqlServiceV2 ksqlServiceV2;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
||||
|
@ -44,13 +40,13 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("executeKsql")
|
||||
.operationParams(command)
|
||||
.build();
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
new KsqlCommandV2ResponseDTO().pipeId(
|
||||
ksqlServiceV2.registerCommand(
|
||||
getCluster(clusterName),
|
||||
command.getKsql(),
|
||||
Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
)
|
||||
.map(ResponseEntity::ok);
|
||||
|
@ -66,7 +62,7 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("openKsqlResponsePipe")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
|
||||
.map(table -> new KsqlResponseDTO()
|
||||
.table(
|
||||
|
@ -86,9 +82,9 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("listStreams")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,8 +96,8 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("listTables")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,6 @@ import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.DeserializationService;
|
||||
import com.provectus.kafka.ui.service.MessagesService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -49,8 +47,6 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
|
||||
private final MessagesService messagesService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||
|
@ -63,13 +59,13 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.topicActions(MESSAGES_DELETE)
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).<ResponseEntity<Void>>then(
|
||||
return validateAccess(context).<ResponseEntity<Void>>then(
|
||||
messagesService.deleteTopicMessages(
|
||||
getCluster(clusterName),
|
||||
topicName,
|
||||
Optional.ofNullable(partitions).orElse(List.of())
|
||||
).thenReturn(ResponseEntity.ok().build())
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -120,9 +116,9 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
);
|
||||
|
||||
var context = contextBuilder.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -137,11 +133,11 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.operationName("sendTopicMessages")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
createTopicMessage.flatMap(msg ->
|
||||
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -192,7 +188,7 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
|
||||
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(dto)
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.map(ResponseEntity::ok)
|
||||
|
|
|
@ -13,8 +13,6 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -38,8 +36,6 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
|
||||
|
||||
private final SchemaRegistryService schemaRegistryService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
protected KafkaCluster getCluster(String clusterName) {
|
||||
|
@ -61,7 +57,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("checkSchemaCompatibility")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
newSchemaSubjectMono.flatMap(subjectDTO ->
|
||||
schemaRegistryService.checksSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
|
@ -70,7 +66,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
))
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,7 +79,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("createNewSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
newSchemaSubjectMono.flatMap(newSubject ->
|
||||
schemaRegistryService.registerNewSchema(
|
||||
getCluster(clusterName),
|
||||
|
@ -92,7 +88,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
)
|
||||
).map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,9 +101,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteLatestSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -122,9 +118,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -139,9 +135,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteSchemaByVersion")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -160,9 +156,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
|
||||
.map(kafkaSrMapper::toDto);
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(schemas))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -185,11 +181,11 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("getLatestSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -203,12 +199,12 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationParams(Map.of("subject", subject, "version", version))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.getSchemaSubjectByVersion(
|
||||
getCluster(clusterName), subject, version)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -244,7 +240,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
|
||||
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
|
||||
}).map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -257,14 +253,14 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("updateGlobalSchemaCompatibilityLevel")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateGlobalSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -280,7 +276,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationParams(Map.of("subject", subject))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateSchemaCompatibility(
|
||||
|
@ -288,7 +284,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
subject,
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.TopicsService;
|
||||
import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -53,8 +51,6 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
private final TopicsService topicsService;
|
||||
private final TopicAnalysisService topicAnalysisService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
||||
|
@ -67,12 +63,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationParams(topicCreation)
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(topicsService.createTopic(getCluster(clusterName), topicCreation))
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -86,11 +82,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("recreateTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService.recreateTopic(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,11 +101,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationParams(Map.of("newTopicName", newTopicName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -123,11 +119,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("deleteTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
topicsService.deleteTopic(getCluster(clusterName), topicName)
|
||||
.thenReturn(ResponseEntity.ok().<Void>build())
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
|
||||
|
@ -142,7 +138,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicConfigs")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
|
||||
.map(lst -> lst.stream()
|
||||
.map(InternalTopicConfig::from)
|
||||
|
@ -150,7 +146,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.collect(toList()))
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -164,11 +160,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicDetails")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService.getTopicDetails(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopicDetails)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -215,7 +211,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.pageCount(totalPages));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -230,12 +226,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("updateTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService
|
||||
.updateTopic(getCluster(clusterName), topicName, topicUpdate)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -250,11 +246,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.topicActions(VIEW, EDIT)
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
partitionsIncrease.flatMap(partitions ->
|
||||
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -270,12 +266,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("changeReplicationFactor")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
replicationFactorChange
|
||||
.flatMap(rfc ->
|
||||
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -288,9 +284,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("analyzeTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicAnalysisService.analyze(getCluster(clusterName), topicName)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -305,9 +301,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("cancelTopicAnalysis")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -324,11 +320,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicAnalysis")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElseGet(() -> ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(
|
||||
|
|
|
@ -1,38 +1,23 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
public abstract class AbstractEmitter {
|
||||
abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final MessagesProcessing messagesProcessing;
|
||||
private final PollingThrottler throttler;
|
||||
protected final PollingSettings pollingSettings;
|
||||
private final PollingSettings pollingSettings;
|
||||
|
||||
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
|
||||
this.messagesProcessing = messagesProcessing;
|
||||
this.pollingSettings = pollingSettings;
|
||||
this.throttler = pollingSettings.getPollingThrottler();
|
||||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
|
||||
return poll(sink, consumer, pollingSettings.getPollTimeout());
|
||||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer, Duration timeout) {
|
||||
Instant start = Instant.now();
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
|
||||
Instant finish = Instant.now();
|
||||
int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
|
||||
throttler.throttleAfterPoll(polledBytes);
|
||||
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
|
||||
var records = consumer.pollEnhanced(pollingSettings.getPollTimeout());
|
||||
sendConsuming(sink, records);
|
||||
return records;
|
||||
}
|
||||
|
||||
|
@ -40,19 +25,16 @@ public abstract class AbstractEmitter {
|
|||
return messagesProcessing.limitReached();
|
||||
}
|
||||
|
||||
protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecord<Bytes, Bytes> msg) {
|
||||
messagesProcessing.sendMsg(sink, msg);
|
||||
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
|
||||
messagesProcessing.send(sink, records);
|
||||
}
|
||||
|
||||
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
|
||||
messagesProcessing.sendPhase(sink, name);
|
||||
}
|
||||
|
||||
protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> records,
|
||||
long elapsed) {
|
||||
return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
|
||||
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink, PolledRecords records) {
|
||||
messagesProcessing.sentConsumingInfo(sink, records);
|
||||
}
|
||||
|
||||
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class BackwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public BackwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
false,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readToOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readToOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readToOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().from()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readToOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readToOffsets.forEach((tp, toOffset) -> {
|
||||
long tpStartOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
if (toOffset > tpStartOffset) {
|
||||
result.put(tp, new FromToOffset(Math.max(tpStartOffset, toOffset - msgsToPollPerPartition), toOffset));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class BackwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
private final int messagesPerPage;
|
||||
|
||||
public BackwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting backward polling for {}", consumerPosition);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Created consumer");
|
||||
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
|
||||
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
|
||||
|
||||
while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) {
|
||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||
if (sink.isCancelled()) {
|
||||
return; //fast return in case of sink cancellation
|
||||
}
|
||||
long beginOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
|
||||
|
||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||
.forEach(r -> sendMessage(sink, r));
|
||||
|
||||
if (beginOffset == readFromOffset) {
|
||||
// we fully read this partition -> removing it from polling iterations
|
||||
readUntilOffsets.remove(tp);
|
||||
} else {
|
||||
// updating 'to' offset for next polling iteration
|
||||
readUntilOffsets.put(tp, readFromOffset);
|
||||
}
|
||||
});
|
||||
if (readUntilOffsets.isEmpty()) {
|
||||
log.debug("begin reached after partitions poll iteration");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.debug("sink is cancelled after partitions poll iteration");
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
||||
TopicPartition tp,
|
||||
long fromOffset,
|
||||
long toOffset,
|
||||
Consumer<Bytes, Bytes> consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink
|
||||
) {
|
||||
consumer.assign(Collections.singleton(tp));
|
||||
consumer.seek(tp, fromOffset);
|
||||
sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset));
|
||||
int desiredMsgsToPoll = (int) (toOffset - fromOffset);
|
||||
|
||||
var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& recordsToSend.size() < desiredMsgsToPoll
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
|
||||
emptyPolls.count(polledRecords);
|
||||
|
||||
log.debug("{} records polled from {}", polledRecords.count(), tp);
|
||||
|
||||
var filteredRecords = polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < toOffset)
|
||||
.toList();
|
||||
|
||||
if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
|
||||
// we already read all messages in target offsets interval
|
||||
break;
|
||||
}
|
||||
recordsToSend.addAll(filteredRecords);
|
||||
}
|
||||
log.debug("{} records to send", recordsToSend.size());
|
||||
Collections.reverse(recordsToSend);
|
||||
return recordsToSend;
|
||||
}
|
||||
}
|
|
@ -2,9 +2,6 @@ package com.provectus.kafka.ui.emitter;
|
|||
|
||||
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
class ConsumingStats {
|
||||
|
@ -12,41 +9,37 @@ class ConsumingStats {
|
|||
private long bytes = 0;
|
||||
private int records = 0;
|
||||
private long elapsed = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
/**
|
||||
* returns bytes polled.
|
||||
*/
|
||||
int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed,
|
||||
int filterApplyErrors) {
|
||||
int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
|
||||
bytes += polledBytes;
|
||||
this.records += polledRecords.count();
|
||||
this.elapsed += elapsed;
|
||||
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
bytes += polledRecords.bytes();
|
||||
records += polledRecords.count();
|
||||
elapsed += polledRecords.elapsed().toMillis();
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
.consuming(createConsumingStats())
|
||||
);
|
||||
return polledBytes;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
|
||||
void incFilterApplyError() {
|
||||
filterApplyErrors++;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.DONE)
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
.consuming(createConsumingStats())
|
||||
);
|
||||
}
|
||||
|
||||
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
|
||||
int filterApplyErrors) {
|
||||
private TopicMessageConsumingDTO createConsumingStats() {
|
||||
return new TopicMessageConsumingDTO()
|
||||
.bytesConsumed(this.bytes)
|
||||
.elapsedMs(this.elapsed)
|
||||
.isCancelled(sink.isCancelled())
|
||||
.bytesConsumed(bytes)
|
||||
.elapsedMs(elapsed)
|
||||
.isCancelled(false)
|
||||
.filterApplyErrors(filterApplyErrors)
|
||||
.messagesConsumed(this.records);
|
||||
.messagesConsumed(records);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
|
||||
// In some situations it is hard to say whether records range (between two offsets) was fully polled.
|
||||
// This happens when we have holes in records sequences that is usual case for compact topics or
|
||||
// topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
|
||||
// there is no guarantee that you will ever see record with offset Y.
|
||||
// To workaround this we can assume that after N consecutive empty polls all target messages were read.
|
||||
public class EmptyPollsCounter {
|
||||
|
||||
private final int maxEmptyPolls;
|
||||
|
||||
private int emptyPolls = 0;
|
||||
|
||||
EmptyPollsCounter(int maxEmptyPolls) {
|
||||
this.maxEmptyPolls = maxEmptyPolls;
|
||||
}
|
||||
|
||||
public void count(ConsumerRecords<?, ?> polled) {
|
||||
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
|
||||
}
|
||||
|
||||
public boolean noDataEmptyPollsReached() {
|
||||
return emptyPolls >= maxEmptyPolls;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import java.time.Duration;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.experimental.Delegate;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
|
||||
public class EnhancedConsumer extends KafkaConsumer<Bytes, Bytes> {
|
||||
|
||||
private final PollingThrottler throttler;
|
||||
private final ApplicationMetrics metrics;
|
||||
private String pollingTopic;
|
||||
|
||||
public EnhancedConsumer(Properties properties,
|
||||
PollingThrottler throttler,
|
||||
ApplicationMetrics metrics) {
|
||||
super(properties, new BytesDeserializer(), new BytesDeserializer());
|
||||
this.throttler = throttler;
|
||||
this.metrics = metrics;
|
||||
metrics.activeConsumers().incrementAndGet();
|
||||
}
|
||||
|
||||
public PolledRecords pollEnhanced(Duration dur) {
|
||||
var stopwatch = Stopwatch.createStarted();
|
||||
ConsumerRecords<Bytes, Bytes> polled = poll(dur);
|
||||
PolledRecords polledEnhanced = PolledRecords.create(polled, stopwatch.elapsed());
|
||||
var throttled = throttler.throttleAfterPoll(polledEnhanced.bytes());
|
||||
metrics.meterPolledRecords(pollingTopic, polledEnhanced, throttled);
|
||||
return polledEnhanced;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assign(Collection<TopicPartition> partitions) {
|
||||
super.assign(partitions);
|
||||
Set<String> assignedTopics = partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet());
|
||||
Preconditions.checkState(assignedTopics.size() == 1);
|
||||
this.pollingTopic = assignedTopics.iterator().next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Pattern pattern) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Collection<String> topics) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(Duration timeout) {
|
||||
metrics.activeConsumers().decrementAndGet();
|
||||
super.close(timeout);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class ForwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public ForwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
true,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readFromOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readFromOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readFromOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().to()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readFromOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readFromOffsets.forEach((tp, fromOffset) -> {
|
||||
long tpEndOffset = seekOperations.getEndOffsets().get(tp);
|
||||
if (fromOffset < tpEndOffset) {
|
||||
result.put(tp, new FromToOffset(fromOffset, Math.min(tpEndOffset, fromOffset + msgsToPollPerPartition)));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,66 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class ForwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition position;
|
||||
|
||||
public ForwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition position,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.position = position;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting forward polling for {}", position);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Assigning partitions");
|
||||
var seekOperations = SeekOperations.create(consumer, position);
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& !seekOperations.assignedPartitionsFullyPolled()
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
|
||||
sendPhase(sink, "Polling");
|
||||
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
|
||||
emptyPolls.count(records);
|
||||
|
||||
log.debug("{} records polled", records.count());
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||
sendMessage(sink, msg);
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,71 +1,75 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static java.util.stream.Collectors.collectingAndThen;
|
||||
import static java.util.stream.Collectors.groupingBy;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class MessagesProcessing {
|
||||
@RequiredArgsConstructor
|
||||
class MessagesProcessing {
|
||||
|
||||
private final ConsumingStats consumingStats = new ConsumingStats();
|
||||
private long sentMessages = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
private final ConsumerRecordDeserializer deserializer;
|
||||
private final Predicate<TopicMessageDTO> filter;
|
||||
private final boolean ascendingSortBeforeSend;
|
||||
private final @Nullable Integer limit;
|
||||
|
||||
public MessagesProcessing(ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
@Nullable Integer limit) {
|
||||
this.deserializer = deserializer;
|
||||
this.filter = filter;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
boolean limitReached() {
|
||||
return limit != null && sentMessages >= limit;
|
||||
}
|
||||
|
||||
void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
if (!sink.isCancelled() && !limitReached()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
filterApplyErrors++;
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
|
||||
sortForSending(polled, ascendingSortBeforeSend)
|
||||
.forEach(rec -> {
|
||||
if (!limitReached() && !sink.isCancelled()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
consumingStats.incFilterApplyError();
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
int sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed) {
|
||||
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
if (!sink.isCancelled()) {
|
||||
return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors);
|
||||
consumingStats.sendConsumingEvt(sink, polledRecords);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendFinishEvent(sink, filterApplyErrors);
|
||||
consumingStats.sendFinishEvent(sink);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,4 +83,30 @@ public class MessagesProcessing {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Iterable<ConsumerRecord<Bytes, Bytes>> sortForSending(Iterable<ConsumerRecord<Bytes, Bytes>> records,
|
||||
boolean asc) {
|
||||
Comparator<ConsumerRecord> offsetComparator = asc
|
||||
? Comparator.comparingLong(ConsumerRecord::offset)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::offset).reversed();
|
||||
|
||||
// partition -> sorted by offsets records
|
||||
Map<Integer, List<ConsumerRecord<Bytes, Bytes>>> perPartition = Streams.stream(records)
|
||||
.collect(
|
||||
groupingBy(
|
||||
ConsumerRecord::partition,
|
||||
TreeMap::new,
|
||||
collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList())));
|
||||
|
||||
Comparator<ConsumerRecord> tsComparator = asc
|
||||
? Comparator.comparing(ConsumerRecord::timestamp)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::timestamp).reversed();
|
||||
|
||||
// merge-sorting records from partitions one by one using timestamp comparator
|
||||
return Iterables.mergeSorted(perPartition.values(), tsComparator);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,12 +8,13 @@ import java.util.Set;
|
|||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Slf4j
|
||||
@Getter
|
||||
public class OffsetsInfo {
|
||||
class OffsetsInfo {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
|
||||
|
@ -23,7 +24,7 @@ public class OffsetsInfo {
|
|||
private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
|
||||
private final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
|
@ -31,8 +32,7 @@ public class OffsetsInfo {
|
|||
);
|
||||
}
|
||||
|
||||
public OffsetsInfo(Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> targetPartitions) {
|
||||
OffsetsInfo(Consumer<?, ?> consumer, Collection<TopicPartition> targetPartitions) {
|
||||
this.consumer = consumer;
|
||||
this.beginOffsets = consumer.beginningOffsets(targetPartitions);
|
||||
this.endOffsets = consumer.endOffsets(targetPartitions);
|
||||
|
@ -46,8 +46,8 @@ public class OffsetsInfo {
|
|||
});
|
||||
}
|
||||
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp: consumer.assignment()) {
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp : consumer.assignment()) {
|
||||
Preconditions.checkArgument(endOffsets.containsKey(tp));
|
||||
if (endOffsets.get(tp) > consumer.position(tp)) {
|
||||
return false;
|
||||
|
@ -56,4 +56,10 @@ public class OffsetsInfo {
|
|||
return true;
|
||||
}
|
||||
|
||||
long summaryOffsetsRange() {
|
||||
MutableLong cnt = new MutableLong();
|
||||
nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp)));
|
||||
return cnt.getValue();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public record PolledRecords(int count,
|
||||
int bytes,
|
||||
Duration elapsed,
|
||||
ConsumerRecords<Bytes, Bytes> records) implements Iterable<ConsumerRecord<Bytes, Bytes>> {
|
||||
|
||||
static PolledRecords create(ConsumerRecords<Bytes, Bytes> polled, Duration pollDuration) {
|
||||
return new PolledRecords(
|
||||
polled.count(),
|
||||
calculatePolledRecSize(polled),
|
||||
pollDuration,
|
||||
polled
|
||||
);
|
||||
}
|
||||
|
||||
public List<ConsumerRecord<Bytes, Bytes>> records(TopicPartition tp) {
|
||||
return records.records(tp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ConsumerRecord<Bytes, Bytes>> iterator() {
|
||||
return records.iterator();
|
||||
}
|
||||
|
||||
private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
|
||||
int polledBytes = 0;
|
||||
for (ConsumerRecord<Bytes, Bytes> rec : recs) {
|
||||
for (Header header : rec.headers()) {
|
||||
polledBytes +=
|
||||
(header.key() != null ? header.key().getBytes().length : 0)
|
||||
+ (header.value() != null ? header.value().length : 0);
|
||||
}
|
||||
polledBytes += rec.key() == null ? 0 : rec.serializedKeySize();
|
||||
polledBytes += rec.value() == null ? 0 : rec.serializedValueSize();
|
||||
}
|
||||
return polledBytes;
|
||||
}
|
||||
}
|
|
@ -8,13 +8,8 @@ import java.util.function.Supplier;
|
|||
public class PollingSettings {
|
||||
|
||||
private static final Duration DEFAULT_POLL_TIMEOUT = Duration.ofMillis(1_000);
|
||||
private static final Duration DEFAULT_PARTITION_POLL_TIMEOUT = Duration.ofMillis(200);
|
||||
private static final int DEFAULT_NO_DATA_EMPTY_POLLS = 3;
|
||||
|
||||
private final Duration pollTimeout;
|
||||
private final Duration partitionPollTimeout;
|
||||
private final int notDataEmptyPolls; //see EmptyPollsCounter docs
|
||||
|
||||
private final Supplier<PollingThrottler> throttlerSupplier;
|
||||
|
||||
public static PollingSettings create(ClustersProperties.Cluster cluster,
|
||||
|
@ -26,18 +21,8 @@ public class PollingSettings {
|
|||
? Duration.ofMillis(pollingProps.getPollTimeoutMs())
|
||||
: DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
var partitionPollTimeout = pollingProps.getPartitionPollTimeout() != null
|
||||
? Duration.ofMillis(pollingProps.getPartitionPollTimeout())
|
||||
: Duration.ofMillis(pollTimeout.toMillis() / 5);
|
||||
|
||||
int noDataEmptyPolls = pollingProps.getNoDataEmptyPolls() != null
|
||||
? pollingProps.getNoDataEmptyPolls()
|
||||
: DEFAULT_NO_DATA_EMPTY_POLLS;
|
||||
|
||||
return new PollingSettings(
|
||||
pollTimeout,
|
||||
partitionPollTimeout,
|
||||
noDataEmptyPolls,
|
||||
PollingThrottler.throttlerSupplier(cluster)
|
||||
);
|
||||
}
|
||||
|
@ -45,34 +30,20 @@ public class PollingSettings {
|
|||
public static PollingSettings createDefault() {
|
||||
return new PollingSettings(
|
||||
DEFAULT_POLL_TIMEOUT,
|
||||
DEFAULT_PARTITION_POLL_TIMEOUT,
|
||||
DEFAULT_NO_DATA_EMPTY_POLLS,
|
||||
PollingThrottler::noop
|
||||
);
|
||||
}
|
||||
|
||||
private PollingSettings(Duration pollTimeout,
|
||||
Duration partitionPollTimeout,
|
||||
int notDataEmptyPolls,
|
||||
Supplier<PollingThrottler> throttlerSupplier) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
this.partitionPollTimeout = partitionPollTimeout;
|
||||
this.notDataEmptyPolls = notDataEmptyPolls;
|
||||
this.throttlerSupplier = throttlerSupplier;
|
||||
}
|
||||
|
||||
public EmptyPollsCounter createEmptyPollsCounter() {
|
||||
return new EmptyPollsCounter(notDataEmptyPolls);
|
||||
}
|
||||
|
||||
public Duration getPollTimeout() {
|
||||
return pollTimeout;
|
||||
}
|
||||
|
||||
public Duration getPartitionPollTimeout() {
|
||||
return partitionPollTimeout;
|
||||
}
|
||||
|
||||
public PollingThrottler getPollingThrottler() {
|
||||
return throttlerSupplier.get();
|
||||
}
|
||||
|
|
|
@ -3,11 +3,8 @@ package com.provectus.kafka.ui.emitter;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Slf4j
|
||||
public class PollingThrottler {
|
||||
|
@ -36,18 +33,17 @@ public class PollingThrottler {
|
|||
return new PollingThrottler("noop", RateLimiter.create(Long.MAX_VALUE));
|
||||
}
|
||||
|
||||
public void throttleAfterPoll(int polledBytes) {
|
||||
//returns true if polling was throttled
|
||||
public boolean throttleAfterPoll(int polledBytes) {
|
||||
if (polledBytes > 0) {
|
||||
double sleptSeconds = rateLimiter.acquire(polledBytes);
|
||||
if (!throttled && sleptSeconds > 0.0) {
|
||||
throttled = true;
|
||||
log.debug("Polling throttling enabled for cluster {} at rate {} bytes/sec", clusterName, rateLimiter.getRate());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void throttleAfterPoll(ConsumerRecords<Bytes, Bytes> polled) {
|
||||
throttleAfterPoll(ConsumerRecordsUtil.calculatePolledSize(polled));
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
abstract class RangePollingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
protected final int messagesPerPage;
|
||||
|
||||
protected RangePollingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
|
||||
}
|
||||
|
||||
//should return empty map if polling should be stopped
|
||||
protected abstract TreeMap<TopicPartition, FromToOffset> nextPollingRange(
|
||||
TreeMap<TopicPartition, FromToOffset> prevRange, //empty on start
|
||||
SeekOperations seekOperations
|
||||
);
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Consumer created");
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
|
||||
log.debug("Starting from offsets {}", pollRange);
|
||||
|
||||
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
|
||||
var polled = poll(consumer, sink, pollRange);
|
||||
send(sink, polled);
|
||||
pollRange = nextPollingRange(pollRange, seekOperations);
|
||||
}
|
||||
if (sink.isCancelled()) {
|
||||
log.debug("Polling finished due to sink cancellation");
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> poll(EnhancedConsumer consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink,
|
||||
TreeMap<TopicPartition, FromToOffset> range) {
|
||||
log.trace("Polling range {}", range);
|
||||
sendPhase(sink,
|
||||
"Polling partitions: %s".formatted(range.keySet().stream().map(TopicPartition::partition).sorted().toList()));
|
||||
|
||||
consumer.assign(range.keySet());
|
||||
range.forEach((tp, fromTo) -> consumer.seek(tp, fromTo.from));
|
||||
|
||||
List<ConsumerRecord<Bytes, Bytes>> result = new ArrayList<>();
|
||||
while (!sink.isCancelled() && consumer.paused().size() < range.size()) {
|
||||
var polledRecords = poll(sink, consumer);
|
||||
range.forEach((tp, fromTo) -> {
|
||||
polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < fromTo.to)
|
||||
.forEach(result::add);
|
||||
|
||||
//next position is out of target range -> pausing partition
|
||||
if (consumer.position(tp) >= fromTo.to) {
|
||||
consumer.pause(List.of(tp));
|
||||
}
|
||||
});
|
||||
}
|
||||
consumer.resume(consumer.paused());
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -10,17 +10,18 @@ import java.util.stream.Collectors;
|
|||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
|
||||
class SeekOperations {
|
||||
public class SeekOperations {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
private final OffsetsInfo offsetsInfo;
|
||||
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||
|
||||
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
OffsetsInfo offsetsInfo;
|
||||
if (consumerPosition.getSeekTo() == null) {
|
||||
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
|
||||
|
@ -34,25 +35,37 @@ class SeekOperations {
|
|||
);
|
||||
}
|
||||
|
||||
void assignAndSeekNonEmptyPartitions() {
|
||||
public void assignAndSeekNonEmptyPartitions() {
|
||||
consumer.assign(offsetsForSeek.keySet());
|
||||
offsetsForSeek.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> getBeginOffsets() {
|
||||
public Map<TopicPartition, Long> getBeginOffsets() {
|
||||
return offsetsInfo.getBeginOffsets();
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> getEndOffsets() {
|
||||
public Map<TopicPartition, Long> getEndOffsets() {
|
||||
return offsetsInfo.getEndOffsets();
|
||||
}
|
||||
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
return offsetsInfo.assignedPartitionsFullyPolled();
|
||||
}
|
||||
|
||||
// sum of (end - start) offsets for all partitions
|
||||
public long summaryOffsetsRange() {
|
||||
return offsetsInfo.summaryOffsetsRange();
|
||||
}
|
||||
|
||||
// sum of differences between initial consumer seek and current consumer position (across all partitions)
|
||||
public long offsetsProcessedFromSeek() {
|
||||
MutableLong count = new MutableLong();
|
||||
offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset));
|
||||
return count.getValue();
|
||||
}
|
||||
|
||||
// Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
|
||||
Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
public Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
return offsetsForSeek;
|
||||
}
|
||||
|
||||
|
@ -61,19 +74,19 @@ class SeekOperations {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
switch (seekType) {
|
||||
case LATEST:
|
||||
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case BEGINNING:
|
||||
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case OFFSET:
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
return fixOffsets(offsetsInfo, seekTo);
|
||||
case TIMESTAMP:
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
|
@ -100,7 +113,7 @@ class SeekOperations {
|
|||
}
|
||||
|
||||
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
timestamps = new HashMap<>(timestamps);
|
||||
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
|
||||
|
|
|
@ -1,27 +1,28 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.HashMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class TailingEmitter extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
public class TailingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
public TailingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
MessagesProcessing messagesProcessing,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings);
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
@ -29,12 +30,12 @@ public class TailingEmitter extends AbstractEmitter
|
|||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting tailing polling for {}", consumerPosition);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
assignAndSeek(consumer);
|
||||
while (!sink.isCancelled()) {
|
||||
sendPhase(sink, "Polling");
|
||||
var polled = poll(sink, consumer);
|
||||
polled.forEach(r -> sendMessage(sink, r));
|
||||
send(sink, polled);
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Tailing finished");
|
||||
|
@ -47,7 +48,7 @@ public class TailingEmitter extends AbstractEmitter
|
|||
}
|
||||
}
|
||||
|
||||
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||
private void assignAndSeek(EnhancedConsumer consumer) {
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
|
||||
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions
|
||||
|
|
|
@ -34,7 +34,7 @@ public interface KafkaConnectMapper {
|
|||
com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
|
||||
connectorPluginConfigValidationResponse);
|
||||
|
||||
default FullConnectorInfoDTO fullConnectorInfoFromTuple(InternalConnectInfo connectInfo) {
|
||||
default FullConnectorInfoDTO fullConnectorInfo(InternalConnectInfo connectInfo) {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
List<TaskDTO> tasks = connectInfo.getTasks();
|
||||
int failedTasksCount = (int) tasks.stream()
|
||||
|
|
|
@ -16,6 +16,7 @@ import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -73,6 +74,10 @@ public class Permission {
|
|||
}
|
||||
|
||||
private List<String> getAllActionValues() {
|
||||
if (resource == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
return switch (this.resource) {
|
||||
case APPLICATIONCONFIG -> Arrays.stream(ApplicationConfigAction.values()).map(Enum::toString).toList();
|
||||
case CLUSTERCONFIG -> Arrays.stream(ClusterConfigAction.values()).map(Enum::toString).toList();
|
||||
|
|
|
@ -1,15 +1,25 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum AclAction implements PermissibleAction {
|
||||
|
||||
VIEW,
|
||||
EDIT;
|
||||
EDIT
|
||||
|
||||
;
|
||||
|
||||
public static final Set<AclAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static AclAction fromString(String name) {
|
||||
return EnumUtils.getEnum(AclAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -10,9 +11,15 @@ public enum ApplicationConfigAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ApplicationConfigAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static ApplicationConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ApplicationConfigAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +1,24 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum AuditAction implements PermissibleAction {
|
||||
|
||||
VIEW;
|
||||
VIEW
|
||||
|
||||
;
|
||||
|
||||
private static final Set<AuditAction> ALTER_ACTIONS = Set.of();
|
||||
|
||||
@Nullable
|
||||
public static AuditAction fromString(String name) {
|
||||
return EnumUtils.getEnum(AuditAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -10,9 +11,15 @@ public enum ClusterConfigAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ClusterConfigAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static ClusterConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ClusterConfigAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -12,9 +13,15 @@ public enum ConnectAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ConnectAction> ALTER_ACTIONS = Set.of(CREATE, EDIT, RESTART);
|
||||
|
||||
@Nullable
|
||||
public static ConnectAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ConnectAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -7,14 +8,19 @@ public enum ConsumerGroupAction implements PermissibleAction {
|
|||
|
||||
VIEW,
|
||||
DELETE,
|
||||
|
||||
RESET_OFFSETS
|
||||
|
||||
;
|
||||
|
||||
public static final Set<ConsumerGroupAction> ALTER_ACTIONS = Set.of(DELETE, RESET_OFFSETS);
|
||||
|
||||
@Nullable
|
||||
public static ConsumerGroupAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ConsumerGroupAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,24 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum KsqlAction implements PermissibleAction {
|
||||
|
||||
EXECUTE;
|
||||
EXECUTE
|
||||
|
||||
;
|
||||
|
||||
public static final Set<KsqlAction> ALTER_ACTIONS = Set.of(EXECUTE);
|
||||
|
||||
@Nullable
|
||||
public static KsqlAction fromString(String name) {
|
||||
return EnumUtils.getEnum(KsqlAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,4 +5,9 @@ public sealed interface PermissibleAction permits
|
|||
ConsumerGroupAction, SchemaAction,
|
||||
ConnectAction, ClusterConfigAction,
|
||||
KsqlAction, TopicAction, AuditAction {
|
||||
|
||||
String name();
|
||||
|
||||
boolean isAlter();
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -13,9 +14,15 @@ public enum SchemaAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<SchemaAction> ALTER_ACTIONS = Set.of(CREATE, DELETE, EDIT, MODIFY_GLOBAL_COMPATIBILITY);
|
||||
|
||||
@Nullable
|
||||
public static SchemaAction fromString(String name) {
|
||||
return EnumUtils.getEnum(SchemaAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -9,16 +10,21 @@ public enum TopicAction implements PermissibleAction {
|
|||
CREATE,
|
||||
EDIT,
|
||||
DELETE,
|
||||
|
||||
MESSAGES_READ,
|
||||
MESSAGES_PRODUCE,
|
||||
MESSAGES_DELETE,
|
||||
|
||||
;
|
||||
|
||||
public static final Set<TopicAction> ALTER_ACTIONS = Set.of(CREATE, EDIT, DELETE, MESSAGES_PRODUCE, MESSAGES_DELETE);
|
||||
|
||||
@Nullable
|
||||
public static TopicAction fromString(String name) {
|
||||
return EnumUtils.getEnum(TopicAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.serdes;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO.TimestampTypeEnum;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import java.time.Instant;
|
||||
import java.time.OffsetDateTime;
|
||||
|
@ -8,6 +9,7 @@ import java.time.ZoneId;
|
|||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.UnaryOperator;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
@ -32,6 +34,8 @@ public class ConsumerRecordDeserializer {
|
|||
private final Serde.Deserializer fallbackKeyDeserializer;
|
||||
private final Serde.Deserializer fallbackValueDeserializer;
|
||||
|
||||
private final UnaryOperator<TopicMessageDTO> masker;
|
||||
|
||||
public TopicMessageDTO deserialize(ConsumerRecord<Bytes, Bytes> rec) {
|
||||
var message = new TopicMessageDTO();
|
||||
fillKey(message, rec);
|
||||
|
@ -47,20 +51,15 @@ public class ConsumerRecordDeserializer {
|
|||
message.setValueSize(getValueSize(rec));
|
||||
message.setHeadersSize(getHeadersSize(rec));
|
||||
|
||||
return message;
|
||||
return masker.apply(message);
|
||||
}
|
||||
|
||||
private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
switch (timestampType) {
|
||||
case CREATE_TIME:
|
||||
return TopicMessageDTO.TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME:
|
||||
return TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE:
|
||||
return TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown timestampType: " + timestampType);
|
||||
}
|
||||
private static TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
return switch (timestampType) {
|
||||
case CREATE_TIME -> TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME -> TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE -> TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
};
|
||||
}
|
||||
|
||||
private void fillHeaders(TopicMessageDTO message, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
|
|
|
@ -12,9 +12,11 @@ import com.provectus.kafka.ui.serde.api.Serde;
|
|||
import com.provectus.kafka.ui.serdes.builtin.AvroEmbeddedSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Base64Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ConsumerOffsetsSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.HexSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int64Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ProtobufRawSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.UInt32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.UInt64Serde;
|
||||
|
@ -47,7 +49,9 @@ public class SerdesInitializer {
|
|||
.put(UInt64Serde.name(), UInt64Serde.class)
|
||||
.put(AvroEmbeddedSerde.name(), AvroEmbeddedSerde.class)
|
||||
.put(Base64Serde.name(), Base64Serde.class)
|
||||
.put(HexSerde.name(), HexSerde.class)
|
||||
.put(UuidBinarySerde.name(), UuidBinarySerde.class)
|
||||
.put(ProtobufRawSerde.name(), ProtobufRawSerde.class)
|
||||
.build(),
|
||||
new CustomSerdeLoader()
|
||||
);
|
||||
|
|
|
@ -19,12 +19,6 @@ public class AvroEmbeddedSerde implements BuiltInSerde {
|
|||
return "Avro (Embedded)";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Base64;
|
||||
|
@ -16,12 +14,6 @@ public class Base64Serde implements BuiltInSerde {
|
|||
return "Base64";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
|
@ -44,31 +36,25 @@ public class Base64Serde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
return new Serializer() {
|
||||
@Override
|
||||
public byte[] serialize(String input) {
|
||||
input = input.trim();
|
||||
// it is actually a hack to provide ability to sent empty array as a key/value
|
||||
if (input.length() == 0) {
|
||||
return new byte[]{};
|
||||
}
|
||||
return Base64.getDecoder().decode(input);
|
||||
var decoder = Base64.getDecoder();
|
||||
return inputString -> {
|
||||
inputString = inputString.trim();
|
||||
// it is actually a hack to provide ability to sent empty array as a key/value
|
||||
if (inputString.length() == 0) {
|
||||
return new byte[] {};
|
||||
}
|
||||
return decoder.decode(inputString);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
var encoder = Base64.getEncoder();
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
return new DeserializeResult(
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
encoder.encodeToString(data),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.serdes.builtin;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.JsonSerializer;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.SerializerProvider;
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule;
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.HexFormat;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
public class HexSerde implements BuiltInSerde {
|
||||
|
||||
private HexFormat deserializeHexFormat;
|
||||
|
||||
public static String name() {
|
||||
return "Hex";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void autoConfigure(PropertyResolver kafkaClusterProperties, PropertyResolver globalProperties) {
|
||||
configure(" ", true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
String delim = serdeProperties.getProperty("delimiter", String.class).orElse(" ");
|
||||
boolean uppercase = serdeProperties.getProperty("uppercase", Boolean.class).orElse(true);
|
||||
configure(delim, uppercase);
|
||||
}
|
||||
|
||||
private void configure(String delim, boolean uppercase) {
|
||||
deserializeHexFormat = HexFormat.ofDelimiter(delim);
|
||||
if (uppercase) {
|
||||
deserializeHexFormat = deserializeHexFormat.withUpperCase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<SchemaDescription> getSchema(String topic, Target type) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDeserialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSerialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
return input -> {
|
||||
input = input.trim();
|
||||
// it is a hack to provide ability to sent empty array as a key/value
|
||||
if (input.length() == 0) {
|
||||
return new byte[] {};
|
||||
}
|
||||
return HexFormat.of().parseHex(prepareInputForParse(input));
|
||||
};
|
||||
}
|
||||
|
||||
// removing most-common delimiters and prefixes
|
||||
private static String prepareInputForParse(String input) {
|
||||
return input
|
||||
.replaceAll(" ", "")
|
||||
.replaceAll("#", "")
|
||||
.replaceAll(":", "");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
deserializeHexFormat.formatHex(data),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
}
|
|
@ -55,15 +55,11 @@ public class Int64Serde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
return new DeserializeResult(
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
String.valueOf(Longs.fromByteArray(data)),
|
||||
DeserializeResult.Type.JSON,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.google.protobuf.UnknownFieldSet;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import lombok.SneakyThrows;
|
||||
|
||||
public class ProtobufRawSerde implements BuiltInSerde {
|
||||
|
||||
public static String name() {
|
||||
return "ProtobufDecodeRaw";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<SchemaDescription> getSchema(String topic, Target type) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSerialize(String topic, Target type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDeserialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@SneakyThrows
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
try {
|
||||
UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data);
|
||||
return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of());
|
||||
} catch (Exception e) {
|
||||
throw new ValidationException(e.getMessage());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -1,10 +1,8 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.google.common.primitives.UnsignedInteger;
|
||||
import com.google.common.primitives.UnsignedLong;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
|
@ -32,7 +30,7 @@ public class UInt64Serde implements BuiltInSerde {
|
|||
+ " \"minimum\" : 0, "
|
||||
+ " \"maximum\" : %s "
|
||||
+ "}",
|
||||
UnsignedInteger.MAX_VALUE
|
||||
UnsignedLong.MAX_VALUE
|
||||
),
|
||||
Map.of()
|
||||
)
|
||||
|
@ -56,15 +54,11 @@ public class UInt64Serde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
return new DeserializeResult(
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
UnsignedLong.fromLongBits(Longs.fromByteArray(data)).toString(),
|
||||
DeserializeResult.Type.JSON,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,41 +50,35 @@ public class UuidBinarySerde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
return new Serializer() {
|
||||
@Override
|
||||
public byte[] serialize(String input) {
|
||||
UUID uuid = UUID.fromString(input);
|
||||
ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
|
||||
if (mostSignificantBitsFirst) {
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
} else {
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
}
|
||||
return bb.array();
|
||||
return input -> {
|
||||
UUID uuid = UUID.fromString(input);
|
||||
ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
|
||||
if (mostSignificantBitsFirst) {
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
} else {
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
}
|
||||
return bb.array();
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
if (data.length != 16) {
|
||||
throw new ValidationException("UUID data should be 16 bytes, but it is " + data.length);
|
||||
}
|
||||
ByteBuffer bb = ByteBuffer.wrap(data);
|
||||
long msb = bb.getLong();
|
||||
long lsb = bb.getLong();
|
||||
UUID uuid = mostSignificantBitsFirst ? new UUID(msb, lsb) : new UUID(lsb, msb);
|
||||
return new DeserializeResult(
|
||||
uuid.toString(),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
return (headers, data) -> {
|
||||
if (data.length != 16) {
|
||||
throw new ValidationException("UUID data should be 16 bytes, but it is " + data.length);
|
||||
}
|
||||
ByteBuffer bb = ByteBuffer.wrap(data);
|
||||
long msb = bb.getLong();
|
||||
long lsb = bb.getLong();
|
||||
UUID uuid = mostSignificantBitsFirst ? new UUID(msb, lsb) : new UUID(lsb, msb);
|
||||
return new DeserializeResult(
|
||||
uuid.toString(),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,12 +2,14 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.collect.Table;
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupOrderingDTO;
|
||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.SortOrderDTO;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -26,11 +28,8 @@ import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
|||
import org.apache.kafka.clients.admin.ConsumerGroupListing;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.ConsumerGroupState;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
|
@ -248,25 +247,27 @@ public class ConsumerGroupService {
|
|||
.flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)));
|
||||
}
|
||||
|
||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
|
||||
public EnhancedConsumer createConsumer(KafkaCluster cluster) {
|
||||
return createConsumer(cluster, Map.of());
|
||||
}
|
||||
|
||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
|
||||
Map<String, Object> properties) {
|
||||
public EnhancedConsumer createConsumer(KafkaCluster cluster,
|
||||
Map<String, Object> properties) {
|
||||
Properties props = new Properties();
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), props);
|
||||
props.putAll(cluster.getProperties());
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-consumer-" + System.currentTimeMillis());
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
|
||||
props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
|
||||
props.putAll(properties);
|
||||
|
||||
return new KafkaConsumer<>(props);
|
||||
return new EnhancedConsumer(
|
||||
props,
|
||||
cluster.getPollingSettings().getPollingThrottler(),
|
||||
ApplicationMetrics.forCluster(cluster)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -102,7 +102,8 @@ public class DeserializationService implements Closeable {
|
|||
valueSerde.deserializer(topic, Serde.Target.VALUE),
|
||||
fallbackSerde.getName(),
|
||||
fallbackSerde.deserializer(topic, Serde.Target.KEY),
|
||||
fallbackSerde.deserializer(topic, Serde.Target.VALUE)
|
||||
fallbackSerde.deserializer(topic, Serde.Target.VALUE),
|
||||
cluster.getMasking().getMaskerForTopic(topic)
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
@ -39,7 +38,6 @@ import org.springframework.stereotype.Service;
|
|||
import org.springframework.web.reactive.function.client.WebClientResponseException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Service
|
||||
@Slf4j
|
||||
|
@ -61,39 +59,22 @@ public class KafkaConnectService {
|
|||
public Flux<FullConnectorInfoDTO> getAllConnectors(final KafkaCluster cluster,
|
||||
@Nullable final String search) {
|
||||
return getConnects(cluster)
|
||||
.flatMap(connect -> getConnectorNames(cluster, connect.getName()).map(cn -> Tuples.of(connect.getName(), cn)))
|
||||
.flatMap(pair -> getConnector(cluster, pair.getT1(), pair.getT2()))
|
||||
.flatMap(connector ->
|
||||
getConnectorConfig(cluster, connector.getConnect(), connector.getName())
|
||||
.map(config -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
.config(config)
|
||||
.build()
|
||||
)
|
||||
)
|
||||
.flatMap(connectInfo -> {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
return getConnectorTasks(cluster, connector.getConnect(), connector.getName())
|
||||
.collectList()
|
||||
.map(tasks -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
.config(connectInfo.getConfig())
|
||||
.tasks(tasks)
|
||||
.build()
|
||||
);
|
||||
})
|
||||
.flatMap(connectInfo -> {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
return getConnectorTopics(cluster, connector.getConnect(), connector.getName())
|
||||
.map(ct -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
.config(connectInfo.getConfig())
|
||||
.tasks(connectInfo.getTasks())
|
||||
.topics(ct.getTopics())
|
||||
.build()
|
||||
);
|
||||
})
|
||||
.map(kafkaConnectMapper::fullConnectorInfoFromTuple)
|
||||
.flatMap(connect ->
|
||||
getConnectorNamesWithErrorsSuppress(cluster, connect.getName())
|
||||
.flatMap(connectorName ->
|
||||
Mono.zip(
|
||||
getConnector(cluster, connect.getName(), connectorName),
|
||||
getConnectorConfig(cluster, connect.getName(), connectorName),
|
||||
getConnectorTasks(cluster, connect.getName(), connectorName).collectList(),
|
||||
getConnectorTopics(cluster, connect.getName(), connectorName)
|
||||
).map(tuple ->
|
||||
InternalConnectInfo.builder()
|
||||
.connector(tuple.getT1())
|
||||
.config(tuple.getT2())
|
||||
.tasks(tuple.getT3())
|
||||
.topics(tuple.getT4().getTopics())
|
||||
.build())))
|
||||
.map(kafkaConnectMapper::fullConnectorInfo)
|
||||
.filter(matchesSearchTerm(search));
|
||||
}
|
||||
|
||||
|
@ -132,6 +113,11 @@ public class KafkaConnectService {
|
|||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
// returns empty flux if there was an error communicating with Connect
|
||||
public Flux<String> getConnectorNamesWithErrorsSuppress(KafkaCluster cluster, String connectName) {
|
||||
return getConnectorNames(cluster, connectName).onErrorComplete();
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private List<String> parseConnectorsNamesStringToList(String json) {
|
||||
return objectMapper.readValue(json, new TypeReference<>() {
|
||||
|
|
|
@ -2,10 +2,9 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessageFilters;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
||||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
|
@ -18,7 +17,6 @@ import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
|||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.time.Instant;
|
||||
|
@ -45,7 +43,6 @@ import org.apache.kafka.common.TopicPartition;
|
|||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
|
@ -236,54 +233,24 @@ public class MessagesService {
|
|||
@Nullable String keySerde,
|
||||
@Nullable String valueSerde) {
|
||||
|
||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||
|
||||
var processing = new MessagesProcessing(
|
||||
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
|
||||
getMsgFilter(query, filterQueryType),
|
||||
seekDirection == SeekDirectionDTO.TAILING ? null : limit
|
||||
);
|
||||
|
||||
if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
||||
emitter = new ForwardRecordEmitter(
|
||||
var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
||||
var filter = getMsgFilter(query, filterQueryType);
|
||||
var emitter = switch (seekDirection) {
|
||||
case FORWARD -> new ForwardEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
||||
);
|
||||
} else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
||||
emitter = new BackwardRecordEmitter(
|
||||
case BACKWARD -> new BackwardEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
limit,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
||||
);
|
||||
} else {
|
||||
emitter = new TailingEmitter(
|
||||
case TAILING -> new TailingEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
consumerPosition, deserializer, filter, cluster.getPollingSettings()
|
||||
);
|
||||
}
|
||||
return Flux.create(emitter)
|
||||
.map(getDataMasker(cluster, topic))
|
||||
.map(throttleUiPublish(seekDirection));
|
||||
}
|
||||
|
||||
private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
||||
var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
||||
var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
||||
return evt -> {
|
||||
if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
||||
return evt;
|
||||
}
|
||||
return evt.message(
|
||||
evt.getMessage()
|
||||
.key(keyMasker.apply(evt.getMessage().getKey()))
|
||||
.content(valMasker.apply(evt.getMessage().getContent())));
|
||||
};
|
||||
return Flux.create(emitter)
|
||||
.map(throttleUiPublish(seekDirection));
|
||||
}
|
||||
|
||||
private Predicate<TopicMessageDTO> getMsgFilter(String query,
|
||||
|
|
|
@ -15,6 +15,8 @@ import com.provectus.kafka.ui.exception.ValidationException;
|
|||
import com.provectus.kafka.ui.util.KafkaVersion;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import java.io.Closeable;
|
||||
import java.time.Duration;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
|
@ -129,38 +131,41 @@ public class ReactiveAdminClient implements Closeable {
|
|||
Set<SupportedFeature> features,
|
||||
boolean topicDeletionIsAllowed) {
|
||||
|
||||
private static Mono<ConfigRelatedInfo> extract(AdminClient ac, int controllerId) {
|
||||
return loadBrokersConfig(ac, List.of(controllerId))
|
||||
.map(map -> map.isEmpty() ? List.<ConfigEntry>of() : map.get(controllerId))
|
||||
.flatMap(configs -> {
|
||||
String version = "1.0-UNKNOWN";
|
||||
boolean topicDeletionEnabled = true;
|
||||
for (ConfigEntry entry : configs) {
|
||||
if (entry.name().contains("inter.broker.protocol.version")) {
|
||||
version = entry.value();
|
||||
}
|
||||
if (entry.name().equals("delete.topic.enable")) {
|
||||
topicDeletionEnabled = Boolean.parseBoolean(entry.value());
|
||||
}
|
||||
}
|
||||
var builder = ConfigRelatedInfo.builder()
|
||||
.version(version)
|
||||
.topicDeletionIsAllowed(topicDeletionEnabled);
|
||||
return SupportedFeature.forVersion(ac, version)
|
||||
.map(features -> builder.features(features).build());
|
||||
});
|
||||
static final Duration UPDATE_DURATION = Duration.of(1, ChronoUnit.HOURS);
|
||||
|
||||
private static Mono<ConfigRelatedInfo> extract(AdminClient ac) {
|
||||
return ReactiveAdminClient.describeClusterImpl(ac, Set.of())
|
||||
.flatMap(desc -> {
|
||||
// choosing node from which we will get configs (starting with controller)
|
||||
var targetNodeId = Optional.ofNullable(desc.controller)
|
||||
.map(Node::id)
|
||||
.orElse(desc.getNodes().iterator().next().id());
|
||||
return loadBrokersConfig(ac, List.of(targetNodeId))
|
||||
.map(map -> map.isEmpty() ? List.<ConfigEntry>of() : map.get(targetNodeId))
|
||||
.flatMap(configs -> {
|
||||
String version = "1.0-UNKNOWN";
|
||||
boolean topicDeletionEnabled = true;
|
||||
for (ConfigEntry entry : configs) {
|
||||
if (entry.name().contains("inter.broker.protocol.version")) {
|
||||
version = entry.value();
|
||||
}
|
||||
if (entry.name().equals("delete.topic.enable")) {
|
||||
topicDeletionEnabled = Boolean.parseBoolean(entry.value());
|
||||
}
|
||||
}
|
||||
final String finalVersion = version;
|
||||
final boolean finalTopicDeletionEnabled = topicDeletionEnabled;
|
||||
return SupportedFeature.forVersion(ac, version)
|
||||
.map(features -> new ConfigRelatedInfo(finalVersion, features, finalTopicDeletionEnabled));
|
||||
});
|
||||
})
|
||||
.cache(UPDATE_DURATION);
|
||||
}
|
||||
}
|
||||
|
||||
public static Mono<ReactiveAdminClient> create(AdminClient adminClient) {
|
||||
return describeClusterImpl(adminClient, Set.of())
|
||||
// choosing node from which we will get configs (starting with controller)
|
||||
.flatMap(descr -> descr.controller != null
|
||||
? Mono.just(descr.controller)
|
||||
: Mono.justOrEmpty(descr.nodes.stream().findFirst())
|
||||
)
|
||||
.flatMap(node -> ConfigRelatedInfo.extract(adminClient, node.id()))
|
||||
.map(info -> new ReactiveAdminClient(adminClient, info));
|
||||
Mono<ConfigRelatedInfo> configRelatedInfoMono = ConfigRelatedInfo.extract(adminClient);
|
||||
return configRelatedInfoMono.map(info -> new ReactiveAdminClient(adminClient, configRelatedInfoMono, info));
|
||||
}
|
||||
|
||||
|
||||
|
@ -170,7 +175,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
.doOnError(th -> !(th instanceof SecurityDisabledException)
|
||||
&& !(th instanceof InvalidRequestException)
|
||||
&& !(th instanceof UnsupportedVersionException),
|
||||
th -> log.warn("Error checking if security enabled", th))
|
||||
th -> log.debug("Error checking if security enabled", th))
|
||||
.onErrorReturn(false);
|
||||
}
|
||||
|
||||
|
@ -202,6 +207,8 @@ public class ReactiveAdminClient implements Closeable {
|
|||
|
||||
@Getter(AccessLevel.PACKAGE) // visible for testing
|
||||
private final AdminClient client;
|
||||
private final Mono<ConfigRelatedInfo> configRelatedInfoMono;
|
||||
|
||||
private volatile ConfigRelatedInfo configRelatedInfo;
|
||||
|
||||
public Set<SupportedFeature> getClusterFeatures() {
|
||||
|
@ -228,7 +235,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
if (controller == null) {
|
||||
return Mono.empty();
|
||||
}
|
||||
return ConfigRelatedInfo.extract(client, controller.id())
|
||||
return configRelatedInfoMono
|
||||
.doOnNext(info -> this.configRelatedInfo = info)
|
||||
.then();
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ public class SchemaRegistryService {
|
|||
.onErrorMap(WebClientResponseException.Conflict.class,
|
||||
th -> new SchemaCompatibilityException())
|
||||
.onErrorMap(WebClientResponseException.UnprocessableEntity.class,
|
||||
th -> new ValidationException("Invalid schema"))
|
||||
th -> new ValidationException("Invalid schema. Error from registry: " + th.getResponseBodyAsString()))
|
||||
.then(getLatestSchemaVersionBySubject(cluster, subject));
|
||||
}
|
||||
|
||||
|
|
|
@ -1,16 +1,44 @@
|
|||
package com.provectus.kafka.ui.service.acl;
|
||||
|
||||
import static org.apache.kafka.common.acl.AclOperation.ALL;
|
||||
import static org.apache.kafka.common.acl.AclOperation.CREATE;
|
||||
import static org.apache.kafka.common.acl.AclOperation.DESCRIBE;
|
||||
import static org.apache.kafka.common.acl.AclOperation.IDEMPOTENT_WRITE;
|
||||
import static org.apache.kafka.common.acl.AclOperation.READ;
|
||||
import static org.apache.kafka.common.acl.AclOperation.WRITE;
|
||||
import static org.apache.kafka.common.acl.AclPermissionType.ALLOW;
|
||||
import static org.apache.kafka.common.resource.PatternType.LITERAL;
|
||||
import static org.apache.kafka.common.resource.PatternType.PREFIXED;
|
||||
import static org.apache.kafka.common.resource.ResourceType.CLUSTER;
|
||||
import static org.apache.kafka.common.resource.ResourceType.GROUP;
|
||||
import static org.apache.kafka.common.resource.ResourceType.TOPIC;
|
||||
import static org.apache.kafka.common.resource.ResourceType.TRANSACTIONAL_ID;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.service.AdminClientService;
|
||||
import com.provectus.kafka.ui.service.ReactiveAdminClient;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.common.acl.AccessControlEntry;
|
||||
import org.apache.kafka.common.acl.AclBinding;
|
||||
import org.apache.kafka.common.acl.AclOperation;
|
||||
import org.apache.kafka.common.resource.Resource;
|
||||
import org.apache.kafka.common.resource.ResourcePattern;
|
||||
import org.apache.kafka.common.resource.ResourcePatternFilter;
|
||||
import org.apache.kafka.common.resource.ResourceType;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
|
@ -22,11 +50,14 @@ public class AclsService {
|
|||
private final AdminClientService adminClientService;
|
||||
|
||||
public Mono<Void> createAcl(KafkaCluster cluster, AclBinding aclBinding) {
|
||||
var aclString = AclCsv.createAclString(aclBinding);
|
||||
log.info("CREATING ACL: [{}]", aclString);
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.createAcls(List.of(aclBinding)))
|
||||
.doOnSuccess(v -> log.info("ACL CREATED: [{}]", aclString));
|
||||
.flatMap(ac -> createAclsWithLogging(ac, List.of(aclBinding)));
|
||||
}
|
||||
|
||||
private Mono<Void> createAclsWithLogging(ReactiveAdminClient ac, Collection<AclBinding> bindings) {
|
||||
bindings.forEach(b -> log.info("CREATING ACL: [{}]", AclCsv.createAclString(b)));
|
||||
return ac.createAcls(bindings)
|
||||
.doOnSuccess(v -> bindings.forEach(b -> log.info("ACL CREATED: [{}]", AclCsv.createAclString(b))));
|
||||
}
|
||||
|
||||
public Mono<Void> deleteAcl(KafkaCluster cluster, AclBinding aclBinding) {
|
||||
|
@ -92,4 +123,150 @@ public class AclsService {
|
|||
}
|
||||
}
|
||||
|
||||
// creates allow binding for resources by prefix or specific names list
|
||||
private List<AclBinding> createAllowBindings(ResourceType resourceType,
|
||||
List<AclOperation> opsToAllow,
|
||||
String principal,
|
||||
String host,
|
||||
@Nullable String resourcePrefix,
|
||||
@Nullable Collection<String> resourceNames) {
|
||||
List<AclBinding> bindings = new ArrayList<>();
|
||||
if (resourcePrefix != null) {
|
||||
for (var op : opsToAllow) {
|
||||
bindings.add(
|
||||
new AclBinding(
|
||||
new ResourcePattern(resourceType, resourcePrefix, PREFIXED),
|
||||
new AccessControlEntry(principal, host, op, ALLOW)));
|
||||
}
|
||||
}
|
||||
if (!CollectionUtils.isEmpty(resourceNames)) {
|
||||
resourceNames.stream()
|
||||
.distinct()
|
||||
.forEach(resource ->
|
||||
opsToAllow.forEach(op ->
|
||||
bindings.add(
|
||||
new AclBinding(
|
||||
new ResourcePattern(resourceType, resource, LITERAL),
|
||||
new AccessControlEntry(principal, host, op, ALLOW)))));
|
||||
}
|
||||
return bindings;
|
||||
}
|
||||
|
||||
public Mono<Void> createConsumerAcl(KafkaCluster cluster, CreateConsumerAclDTO request) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> createAclsWithLogging(ac, createConsumerBindings(request)))
|
||||
.then();
|
||||
}
|
||||
|
||||
//Read, Describe on topics, Read on consumerGroups
|
||||
private List<AclBinding> createConsumerBindings(CreateConsumerAclDTO request) {
|
||||
List<AclBinding> bindings = new ArrayList<>();
|
||||
bindings.addAll(
|
||||
createAllowBindings(TOPIC,
|
||||
List.of(READ, DESCRIBE),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
request.getTopicsPrefix(),
|
||||
request.getTopics()));
|
||||
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
GROUP,
|
||||
List.of(READ),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
request.getConsumerGroupsPrefix(),
|
||||
request.getConsumerGroups()));
|
||||
return bindings;
|
||||
}
|
||||
|
||||
public Mono<Void> createProducerAcl(KafkaCluster cluster, CreateProducerAclDTO request) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> createAclsWithLogging(ac, createProducerBindings(request)))
|
||||
.then();
|
||||
}
|
||||
|
||||
//Write, Describe, Create permission on topics, Write, Describe on transactionalIds
|
||||
//IDEMPOTENT_WRITE on cluster if idempotent is enabled
|
||||
private List<AclBinding> createProducerBindings(CreateProducerAclDTO request) {
|
||||
List<AclBinding> bindings = new ArrayList<>();
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
TOPIC,
|
||||
List.of(WRITE, DESCRIBE, CREATE),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
request.getTopicsPrefix(),
|
||||
request.getTopics()));
|
||||
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
TRANSACTIONAL_ID,
|
||||
List.of(WRITE, DESCRIBE),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
request.getTransactionsIdPrefix(),
|
||||
Optional.ofNullable(request.getTransactionalId()).map(List::of).orElse(null)));
|
||||
|
||||
if (Boolean.TRUE.equals(request.getIdempotent())) {
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
CLUSTER,
|
||||
List.of(IDEMPOTENT_WRITE),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
null,
|
||||
List.of(Resource.CLUSTER_NAME))); // cluster name is a const string in ACL api
|
||||
}
|
||||
return bindings;
|
||||
}
|
||||
|
||||
public Mono<Void> createStreamAppAcl(KafkaCluster cluster, CreateStreamAppAclDTO request) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> createAclsWithLogging(ac, createStreamAppBindings(request)))
|
||||
.then();
|
||||
}
|
||||
|
||||
// Read on input topics, Write on output topics
|
||||
// ALL on applicationId-prefixed Groups and Topics
|
||||
private List<AclBinding> createStreamAppBindings(CreateStreamAppAclDTO request) {
|
||||
List<AclBinding> bindings = new ArrayList<>();
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
TOPIC,
|
||||
List.of(READ),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
null,
|
||||
request.getInputTopics()));
|
||||
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
TOPIC,
|
||||
List.of(WRITE),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
null,
|
||||
request.getOutputTopics()));
|
||||
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
GROUP,
|
||||
List.of(ALL),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
request.getApplicationId(),
|
||||
null));
|
||||
|
||||
bindings.addAll(
|
||||
createAllowBindings(
|
||||
TOPIC,
|
||||
List.of(ALL),
|
||||
request.getPrincipal(),
|
||||
request.getHost(),
|
||||
request.getApplicationId(),
|
||||
null));
|
||||
return bindings;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -92,14 +92,12 @@ class AnalysisTasksStore {
|
|||
.result(completedState);
|
||||
}
|
||||
|
||||
@Value
|
||||
@Builder(toBuilder = true)
|
||||
private static class RunningAnalysis {
|
||||
Instant startedAt;
|
||||
double completenessPercent;
|
||||
long msgsScanned;
|
||||
long bytesScanned;
|
||||
Closeable task;
|
||||
private record RunningAnalysis(Instant startedAt,
|
||||
double completenessPercent,
|
||||
long msgsScanned,
|
||||
long bytesScanned,
|
||||
Closeable task) {
|
||||
|
||||
TopicAnalysisProgressDTO toDto() {
|
||||
return new TopicAnalysisProgressDTO()
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package com.provectus.kafka.ui.service.analyze;
|
||||
|
||||
import com.provectus.kafka.ui.emitter.EmptyPollsCounter;
|
||||
import com.provectus.kafka.ui.emitter.OffsetsInfo;
|
||||
import com.provectus.kafka.ui.emitter.PollingSettings;
|
||||
import com.provectus.kafka.ui.emitter.PollingThrottler;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
||||
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.emitter.SeekOperations;
|
||||
import com.provectus.kafka.ui.exception.TopicAnalysisException;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.TopicAnalysisDTO;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
|
@ -15,18 +16,14 @@ import java.time.Instant;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.errors.WakeupException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.springframework.stereotype.Component;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Scheduler;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
|
||||
|
@ -35,6 +32,14 @@ import reactor.core.scheduler.Schedulers;
|
|||
@RequiredArgsConstructor
|
||||
public class TopicAnalysisService {
|
||||
|
||||
private static final Scheduler SCHEDULER = Schedulers.newBoundedElastic(
|
||||
Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE,
|
||||
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
||||
"topic-analysis-tasks",
|
||||
10, //ttl for idle threads (in sec)
|
||||
true //daemon
|
||||
);
|
||||
|
||||
private final AnalysisTasksStore analysisTasksStore = new AnalysisTasksStore();
|
||||
|
||||
private final TopicsService topicsService;
|
||||
|
@ -42,30 +47,18 @@ public class TopicAnalysisService {
|
|||
|
||||
public Mono<Void> analyze(KafkaCluster cluster, String topicName) {
|
||||
return topicsService.getTopicDetails(cluster, topicName)
|
||||
.doOnNext(topic ->
|
||||
startAnalysis(
|
||||
cluster,
|
||||
topicName,
|
||||
topic.getPartitionCount(),
|
||||
topic.getPartitions().values()
|
||||
.stream()
|
||||
.mapToLong(p -> p.getOffsetMax() - p.getOffsetMin())
|
||||
.sum()
|
||||
)
|
||||
).then();
|
||||
.doOnNext(topic -> startAnalysis(cluster, topicName))
|
||||
.then();
|
||||
}
|
||||
|
||||
private synchronized void startAnalysis(KafkaCluster cluster,
|
||||
String topic,
|
||||
int partitionsCnt,
|
||||
long approxNumberOfMsgs) {
|
||||
private synchronized void startAnalysis(KafkaCluster cluster, String topic) {
|
||||
var topicId = new TopicIdentity(cluster, topic);
|
||||
if (analysisTasksStore.isAnalysisInProgress(topicId)) {
|
||||
throw new TopicAnalysisException("Topic is already analyzing");
|
||||
}
|
||||
var task = new AnalysisTask(cluster, topicId, partitionsCnt, approxNumberOfMsgs, cluster.getPollingSettings());
|
||||
var task = new AnalysisTask(cluster, topicId);
|
||||
analysisTasksStore.registerNewTask(topicId, task);
|
||||
Schedulers.boundedElastic().schedule(task);
|
||||
SCHEDULER.schedule(task);
|
||||
}
|
||||
|
||||
public void cancelAnalysis(KafkaCluster cluster, String topicName) {
|
||||
|
@ -81,21 +74,14 @@ public class TopicAnalysisService {
|
|||
private final Instant startedAt = Instant.now();
|
||||
|
||||
private final TopicIdentity topicId;
|
||||
private final int partitionsCnt;
|
||||
private final long approxNumberOfMsgs;
|
||||
private final EmptyPollsCounter emptyPollsCounter;
|
||||
private final PollingThrottler throttler;
|
||||
|
||||
private final TopicAnalysisStats totalStats = new TopicAnalysisStats();
|
||||
private final Map<Integer, TopicAnalysisStats> partitionStats = new HashMap<>();
|
||||
|
||||
private final KafkaConsumer<Bytes, Bytes> consumer;
|
||||
private final EnhancedConsumer consumer;
|
||||
|
||||
AnalysisTask(KafkaCluster cluster, TopicIdentity topicId, int partitionsCnt,
|
||||
long approxNumberOfMsgs, PollingSettings pollingSettings) {
|
||||
AnalysisTask(KafkaCluster cluster, TopicIdentity topicId) {
|
||||
this.topicId = topicId;
|
||||
this.approxNumberOfMsgs = approxNumberOfMsgs;
|
||||
this.partitionsCnt = partitionsCnt;
|
||||
this.consumer = consumerGroupService.createConsumer(
|
||||
cluster,
|
||||
// to improve polling throughput
|
||||
|
@ -104,8 +90,6 @@ public class TopicAnalysisService {
|
|||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100000"
|
||||
)
|
||||
);
|
||||
this.throttler = pollingSettings.getPollingThrottler();
|
||||
this.emptyPollsCounter = pollingSettings.createEmptyPollsCounter();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -117,24 +101,20 @@ public class TopicAnalysisService {
|
|||
public void run() {
|
||||
try {
|
||||
log.info("Starting {} topic analysis", topicId);
|
||||
var topicPartitions = IntStream.range(0, partitionsCnt)
|
||||
.peek(i -> partitionStats.put(i, new TopicAnalysisStats()))
|
||||
.mapToObj(i -> new TopicPartition(topicId.topicName, i))
|
||||
.collect(Collectors.toList());
|
||||
consumer.partitionsFor(topicId.topicName)
|
||||
.forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats()));
|
||||
|
||||
consumer.assign(topicPartitions);
|
||||
consumer.seekToBeginning(topicPartitions);
|
||||
var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null));
|
||||
long summaryOffsetsRange = seekOperations.summaryOffsetsRange();
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
var offsetsInfo = new OffsetsInfo(consumer, topicId.topicName);
|
||||
while (!offsetsInfo.assignedPartitionsFullyPolled() && !emptyPollsCounter.noDataEmptyPollsReached()) {
|
||||
var polled = consumer.poll(Duration.ofSeconds(3));
|
||||
throttler.throttleAfterPoll(polled);
|
||||
emptyPollsCounter.count(polled);
|
||||
while (!seekOperations.assignedPartitionsFullyPolled()) {
|
||||
var polled = consumer.pollEnhanced(Duration.ofSeconds(3));
|
||||
polled.forEach(r -> {
|
||||
totalStats.apply(r);
|
||||
partitionStats.get(r.partition()).apply(r);
|
||||
});
|
||||
updateProgress();
|
||||
updateProgress(seekOperations.offsetsProcessedFromSeek(), summaryOffsetsRange);
|
||||
}
|
||||
analysisTasksStore.setAnalysisResult(topicId, startedAt, totalStats, partitionStats);
|
||||
log.info("{} topic analysis finished", topicId);
|
||||
|
@ -150,13 +130,13 @@ public class TopicAnalysisService {
|
|||
}
|
||||
}
|
||||
|
||||
private void updateProgress() {
|
||||
if (totalStats.totalMsgs > 0 && approxNumberOfMsgs != 0) {
|
||||
private void updateProgress(long processedOffsets, long summaryOffsetsRange) {
|
||||
if (processedOffsets > 0 && summaryOffsetsRange != 0) {
|
||||
analysisTasksStore.updateProgress(
|
||||
topicId,
|
||||
totalStats.totalMsgs,
|
||||
totalStats.keysSize.sum + totalStats.valuesSize.sum,
|
||||
Math.min(100.0, (((double) totalStats.totalMsgs) / approxNumberOfMsgs) * 100)
|
||||
Math.min(100.0, (((double) processedOffsets) / summaryOffsetsRange) * 100)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.exception.CustomBaseException;
|
|||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.Resource;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.PermissibleAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
@ -33,16 +34,20 @@ record AuditRecord(String timestamp,
|
|||
return MAPPER.writeValueAsString(this);
|
||||
}
|
||||
|
||||
record AuditResource(String accessType, Resource type, @Nullable Object id) {
|
||||
record AuditResource(String accessType, boolean alter, Resource type, @Nullable Object id) {
|
||||
|
||||
private static AuditResource create(PermissibleAction action, Resource type, @Nullable Object id) {
|
||||
return new AuditResource(action.name(), action.isAlter(), type, id);
|
||||
}
|
||||
|
||||
static List<AuditResource> getAccessedResources(AccessContext ctx) {
|
||||
List<AuditResource> resources = new ArrayList<>();
|
||||
ctx.getClusterConfigActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.CLUSTERCONFIG, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.CLUSTERCONFIG, null)));
|
||||
ctx.getTopicActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.TOPIC, nameId(ctx.getTopic()))));
|
||||
.forEach(a -> resources.add(create(a, Resource.TOPIC, nameId(ctx.getTopic()))));
|
||||
ctx.getConsumerGroupActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.CONSUMER, nameId(ctx.getConsumerGroup()))));
|
||||
.forEach(a -> resources.add(create(a, Resource.CONSUMER, nameId(ctx.getConsumerGroup()))));
|
||||
ctx.getConnectActions()
|
||||
.forEach(a -> {
|
||||
Map<String, String> resourceId = new LinkedHashMap<>();
|
||||
|
@ -50,16 +55,16 @@ record AuditRecord(String timestamp,
|
|||
if (ctx.getConnector() != null) {
|
||||
resourceId.put("connector", ctx.getConnector());
|
||||
}
|
||||
resources.add(new AuditResource(a.name(), Resource.CONNECT, resourceId));
|
||||
resources.add(create(a, Resource.CONNECT, resourceId));
|
||||
});
|
||||
ctx.getSchemaActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.SCHEMA, nameId(ctx.getSchema()))));
|
||||
.forEach(a -> resources.add(create(a, Resource.SCHEMA, nameId(ctx.getSchema()))));
|
||||
ctx.getKsqlActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.KSQL, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.KSQL, null)));
|
||||
ctx.getAclActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.ACL, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.ACL, null)));
|
||||
ctx.getAuditAction()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.AUDIT, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.AUDIT, null)));
|
||||
return resources;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package com.provectus.kafka.ui.service.audit;
|
||||
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.AuditProperties.LogLevel.ALTER_ONLY;
|
||||
import static com.provectus.kafka.ui.service.MessagesService.createProducer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.config.auth.AuthenticatedUser;
|
||||
import com.provectus.kafka.ui.config.auth.RbacUser;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.AdminClientService;
|
||||
|
@ -13,12 +13,14 @@ import com.provectus.kafka.ui.service.ClustersStorage;
|
|||
import com.provectus.kafka.ui.service.ReactiveAdminClient;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.time.Duration;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
|
@ -26,7 +28,9 @@ import org.apache.kafka.clients.producer.ProducerConfig;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.core.context.SecurityContext;
|
||||
import org.springframework.security.core.userdetails.UserDetails;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
@ -37,6 +41,7 @@ import reactor.core.publisher.Signal;
|
|||
public class AuditService implements Closeable {
|
||||
|
||||
private static final Mono<AuthenticatedUser> NO_AUTH_USER = Mono.just(new AuthenticatedUser("Unknown", Set.of()));
|
||||
private static final Duration BLOCK_TIMEOUT = Duration.ofSeconds(5);
|
||||
|
||||
private static final String DEFAULT_AUDIT_TOPIC_NAME = "__kui-audit-log";
|
||||
private static final int DEFAULT_AUDIT_TOPIC_PARTITIONS = 1;
|
||||
|
@ -56,14 +61,8 @@ public class AuditService implements Closeable {
|
|||
public AuditService(AdminClientService adminClientService, ClustersStorage clustersStorage) {
|
||||
Map<String, AuditWriter> auditWriters = new HashMap<>();
|
||||
for (var cluster : clustersStorage.getKafkaClusters()) {
|
||||
ReactiveAdminClient adminClient;
|
||||
try {
|
||||
adminClient = adminClientService.get(cluster).block();
|
||||
} catch (Exception e) {
|
||||
printAuditInitError(cluster, "Error connect to cluster", e);
|
||||
continue;
|
||||
}
|
||||
createAuditWriter(cluster, adminClient, () -> createProducer(cluster, AUDIT_PRODUCER_CONFIG))
|
||||
Supplier<ReactiveAdminClient> adminClientSupplier = () -> adminClientService.get(cluster).block(BLOCK_TIMEOUT);
|
||||
createAuditWriter(cluster, adminClientSupplier, () -> createProducer(cluster, AUDIT_PRODUCER_CONFIG))
|
||||
.ifPresent(writer -> auditWriters.put(cluster.getName(), writer));
|
||||
}
|
||||
this.auditWriters = auditWriters;
|
||||
|
@ -76,7 +75,7 @@ public class AuditService implements Closeable {
|
|||
|
||||
@VisibleForTesting
|
||||
static Optional<AuditWriter> createAuditWriter(KafkaCluster cluster,
|
||||
ReactiveAdminClient ac,
|
||||
Supplier<ReactiveAdminClient> acSupplier,
|
||||
Supplier<KafkaProducer<byte[], byte[]>> producerFactory) {
|
||||
var auditProps = cluster.getOriginalProperties().getAudit();
|
||||
if (auditProps == null) {
|
||||
|
@ -84,35 +83,59 @@ public class AuditService implements Closeable {
|
|||
}
|
||||
boolean topicAudit = Optional.ofNullable(auditProps.getTopicAuditEnabled()).orElse(false);
|
||||
boolean consoleAudit = Optional.ofNullable(auditProps.getConsoleAuditEnabled()).orElse(false);
|
||||
boolean alterLogOnly = Optional.ofNullable(auditProps.getLevel()).map(lvl -> lvl == ALTER_ONLY).orElse(true);
|
||||
if (!topicAudit && !consoleAudit) {
|
||||
return Optional.empty();
|
||||
}
|
||||
String auditTopicName = Optional.ofNullable(auditProps.getTopic()).orElse(DEFAULT_AUDIT_TOPIC_NAME);
|
||||
@Nullable KafkaProducer<byte[], byte[]> producer = null;
|
||||
if (topicAudit && createTopicIfNeeded(cluster, ac, auditTopicName, auditProps)) {
|
||||
producer = producerFactory.get();
|
||||
if (!topicAudit) {
|
||||
log.info("Audit initialization finished for cluster '{}' (console only)", cluster.getName());
|
||||
return Optional.of(consoleOnlyWriter(cluster, alterLogOnly));
|
||||
}
|
||||
log.info("Audit service initialized for cluster '{}'", cluster.getName());
|
||||
String auditTopicName = Optional.ofNullable(auditProps.getTopic()).orElse(DEFAULT_AUDIT_TOPIC_NAME);
|
||||
boolean topicAuditCanBeDone = createTopicIfNeeded(cluster, acSupplier, auditTopicName, auditProps);
|
||||
if (!topicAuditCanBeDone) {
|
||||
if (consoleAudit) {
|
||||
log.info(
|
||||
"Audit initialization finished for cluster '{}' (console only, topic audit init failed)",
|
||||
cluster.getName()
|
||||
);
|
||||
return Optional.of(consoleOnlyWriter(cluster, alterLogOnly));
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
log.info("Audit initialization finished for cluster '{}'", cluster.getName());
|
||||
return Optional.of(
|
||||
new AuditWriter(
|
||||
cluster.getName(),
|
||||
alterLogOnly,
|
||||
auditTopicName,
|
||||
producer,
|
||||
producerFactory.get(),
|
||||
consoleAudit ? AUDIT_LOGGER : null
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private static AuditWriter consoleOnlyWriter(KafkaCluster cluster, boolean alterLogOnly) {
|
||||
return new AuditWriter(cluster.getName(), alterLogOnly, null, null, AUDIT_LOGGER);
|
||||
}
|
||||
|
||||
/**
|
||||
* return true if topic created/existing and producing can be enabled.
|
||||
*/
|
||||
private static boolean createTopicIfNeeded(KafkaCluster cluster,
|
||||
ReactiveAdminClient ac,
|
||||
Supplier<ReactiveAdminClient> acSupplier,
|
||||
String auditTopicName,
|
||||
ClustersProperties.AuditProperties auditProps) {
|
||||
ReactiveAdminClient ac;
|
||||
try {
|
||||
ac = acSupplier.get();
|
||||
} catch (Exception e) {
|
||||
printAuditInitError(cluster, "Error while connecting to the cluster", e);
|
||||
return false;
|
||||
}
|
||||
boolean topicExists;
|
||||
try {
|
||||
topicExists = ac.listTopics(true).block().contains(auditTopicName);
|
||||
topicExists = ac.listTopics(true).block(BLOCK_TIMEOUT).contains(auditTopicName);
|
||||
} catch (Exception e) {
|
||||
printAuditInitError(cluster, "Error checking audit topic existence", e);
|
||||
return false;
|
||||
|
@ -130,7 +153,7 @@ public class AuditService implements Closeable {
|
|||
.ifPresent(topicConfig::putAll);
|
||||
|
||||
log.info("Creating audit topic '{}' for cluster '{}'", auditTopicName, cluster.getName());
|
||||
ac.createTopic(auditTopicName, topicPartitions, null, topicConfig).block();
|
||||
ac.createTopic(auditTopicName, topicPartitions, null, topicConfig).block(BLOCK_TIMEOUT);
|
||||
log.info("Audit topic created for cluster '{}'", cluster.getName());
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
|
@ -142,7 +165,7 @@ public class AuditService implements Closeable {
|
|||
private static void printAuditInitError(KafkaCluster cluster, String errorMsg, Exception cause) {
|
||||
log.error("-----------------------------------------------------------------");
|
||||
log.error(
|
||||
"Error initializing Audit Service for cluster '{}'. Audit will be disabled. See error below: ",
|
||||
"Error initializing Audit for cluster '{}'. Audit will be disabled. See error below: ",
|
||||
cluster.getName()
|
||||
);
|
||||
log.error("{}", errorMsg, cause);
|
||||
|
@ -174,8 +197,11 @@ public class AuditService implements Closeable {
|
|||
if (sig.getContextView().hasKey(key)) {
|
||||
return sig.getContextView().<Mono<SecurityContext>>get(key)
|
||||
.map(context -> context.getAuthentication().getPrincipal())
|
||||
.cast(RbacUser.class)
|
||||
.map(user -> new AuthenticatedUser(user.name(), user.groups()))
|
||||
.cast(UserDetails.class)
|
||||
.map(user -> {
|
||||
var roles = user.getAuthorities().stream().map(GrantedAuthority::getAuthority).collect(Collectors.toSet());
|
||||
return new AuthenticatedUser(user.getUsername(), roles);
|
||||
})
|
||||
.switchIfEmpty(NO_AUTH_USER);
|
||||
} else {
|
||||
return NO_AUTH_USER;
|
||||
|
|
|
@ -18,7 +18,8 @@ import org.slf4j.Logger;
|
|||
|
||||
@Slf4j
|
||||
record AuditWriter(String clusterName,
|
||||
String targetTopic,
|
||||
boolean logAlterOperationsOnly,
|
||||
@Nullable String targetTopic,
|
||||
@Nullable KafkaProducer<byte[], byte[]> producer,
|
||||
@Nullable Logger consoleLogger) implements Closeable {
|
||||
|
||||
|
@ -39,11 +40,15 @@ record AuditWriter(String clusterName,
|
|||
}
|
||||
|
||||
private void write(AuditRecord rec) {
|
||||
if (logAlterOperationsOnly && rec.resources().stream().noneMatch(AuditResource::alter)) {
|
||||
//we should only log alter operations, but this is read-only op
|
||||
return;
|
||||
}
|
||||
String json = rec.toJson();
|
||||
if (consoleLogger != null) {
|
||||
consoleLogger.info(json);
|
||||
}
|
||||
if (producer != null) {
|
||||
if (targetTopic != null && producer != null) {
|
||||
producer.send(
|
||||
new ProducerRecord<>(targetTopic, null, json.getBytes(UTF_8)),
|
||||
(metadata, ex) -> {
|
||||
|
|
|
@ -25,7 +25,7 @@ class ConnectorsExporter {
|
|||
|
||||
Flux<DataEntityList> export(KafkaCluster cluster) {
|
||||
return kafkaConnectService.getConnects(cluster)
|
||||
.flatMap(connect -> kafkaConnectService.getConnectorNames(cluster, connect.getName())
|
||||
.flatMap(connect -> kafkaConnectService.getConnectorNamesWithErrorsSuppress(cluster, connect.getName())
|
||||
.flatMap(connectorName -> kafkaConnectService.getConnector(cluster, connect.getName(), connectorName))
|
||||
.flatMap(connectorDTO ->
|
||||
kafkaConnectService.getConnectorTopics(cluster, connect.getName(), connectorDTO.getName())
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package com.provectus.kafka.ui.service.masking;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
|
@ -9,6 +7,7 @@ import com.fasterxml.jackson.databind.node.ContainerNode;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.service.masking.policies.MaskingPolicy;
|
||||
import java.util.List;
|
||||
|
@ -54,7 +53,8 @@ public class DataMasking {
|
|||
Optional.ofNullable(property.getTopicValuesPattern()).map(Pattern::compile).orElse(null),
|
||||
MaskingPolicy.create(property)
|
||||
);
|
||||
}).collect(toList()));
|
||||
}).toList()
|
||||
);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -62,8 +62,17 @@ public class DataMasking {
|
|||
this.masks = masks;
|
||||
}
|
||||
|
||||
public UnaryOperator<String> getMaskingFunction(String topic, Serde.Target target) {
|
||||
var targetMasks = masks.stream().filter(m -> m.shouldBeApplied(topic, target)).collect(toList());
|
||||
public UnaryOperator<TopicMessageDTO> getMaskerForTopic(String topic) {
|
||||
var keyMasker = getMaskingFunction(topic, Serde.Target.KEY);
|
||||
var valMasker = getMaskingFunction(topic, Serde.Target.VALUE);
|
||||
return msg -> msg
|
||||
.key(keyMasker.apply(msg.getKey()))
|
||||
.content(valMasker.apply(msg.getContent()));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
UnaryOperator<String> getMaskingFunction(String topic, Serde.Target target) {
|
||||
var targetMasks = masks.stream().filter(m -> m.shouldBeApplied(topic, target)).toList();
|
||||
if (targetMasks.isEmpty()) {
|
||||
return UnaryOperator.identity();
|
||||
}
|
||||
|
|
|
@ -51,6 +51,8 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class AccessControlService {
|
||||
|
||||
private static final String ACCESS_DENIED = "Access denied";
|
||||
|
||||
@Nullable
|
||||
private final InMemoryReactiveClientRegistrationRepository clientRegistrationRepository;
|
||||
private final RoleBasedAccessControlProperties properties;
|
||||
|
@ -97,6 +99,17 @@ public class AccessControlService {
|
|||
return Mono.empty();
|
||||
}
|
||||
|
||||
if (CollectionUtils.isNotEmpty(context.getApplicationConfigActions())) {
|
||||
return getUser()
|
||||
.doOnNext(user -> {
|
||||
boolean accessGranted = isApplicationConfigAccessible(context, user);
|
||||
|
||||
if (!accessGranted) {
|
||||
throw new AccessDeniedException(ACCESS_DENIED);
|
||||
}
|
||||
}).then();
|
||||
}
|
||||
|
||||
return getUser()
|
||||
.doOnNext(user -> {
|
||||
boolean accessGranted =
|
||||
|
@ -113,7 +126,7 @@ public class AccessControlService {
|
|||
&& isAuditAccessible(context, user);
|
||||
|
||||
if (!accessGranted) {
|
||||
throw new AccessDeniedException("Access denied");
|
||||
throw new AccessDeniedException(ACCESS_DENIED);
|
||||
}
|
||||
})
|
||||
.then();
|
||||
|
|
|
@ -5,6 +5,8 @@ import static com.provectus.kafka.ui.model.rbac.provider.Provider.Name.GITHUB;
|
|||
import com.provectus.kafka.ui.model.rbac.Role;
|
||||
import com.provectus.kafka.ui.model.rbac.provider.Provider;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -26,6 +28,8 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
private static final String ORGANIZATION_ATTRIBUTE_NAME = "organizations_url";
|
||||
private static final String USERNAME_ATTRIBUTE_NAME = "login";
|
||||
private static final String ORGANIZATION_NAME = "login";
|
||||
private static final String ORGANIZATION = "organization";
|
||||
private static final String TEAM_NAME = "slug";
|
||||
private static final String GITHUB_ACCEPT_HEADER = "application/vnd.github+json";
|
||||
private static final String DUMMY = "dummy";
|
||||
// The number of results (max 100) per page of list organizations for authenticated user.
|
||||
|
@ -46,7 +50,7 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
throw new RuntimeException();
|
||||
}
|
||||
|
||||
Set<String> groupsByUsername = new HashSet<>();
|
||||
Set<String> rolesByUsername = new HashSet<>();
|
||||
String username = principal.getAttribute(USERNAME_ATTRIBUTE_NAME);
|
||||
if (username == null) {
|
||||
log.debug("Github username param is not present");
|
||||
|
@ -59,13 +63,7 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
.filter(s -> s.getType().equals("user"))
|
||||
.anyMatch(s -> s.getValue().equals(username)))
|
||||
.map(Role::getName)
|
||||
.forEach(groupsByUsername::add);
|
||||
}
|
||||
|
||||
String organization = principal.getAttribute(ORGANIZATION_ATTRIBUTE_NAME);
|
||||
if (organization == null) {
|
||||
log.debug("Github organization param is not present");
|
||||
return Mono.just(groupsByUsername);
|
||||
.forEach(rolesByUsername::add);
|
||||
}
|
||||
|
||||
OAuth2UserRequest req = (OAuth2UserRequest) additionalParams.get("request");
|
||||
|
@ -80,8 +78,24 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
.getUserInfoEndpoint()
|
||||
.getUri();
|
||||
}
|
||||
var webClient = WebClient.create(infoEndpoint);
|
||||
|
||||
WebClient webClient = WebClient.create(infoEndpoint);
|
||||
Mono<Set<String>> rolesByOrganization = getOrganizationRoles(principal, additionalParams, acs, webClient);
|
||||
Mono<Set<String>> rolesByTeams = getTeamRoles(webClient, additionalParams, acs);
|
||||
|
||||
return Mono.zip(rolesByOrganization, rolesByTeams)
|
||||
.map((t) -> Stream.of(t.getT1(), t.getT2(), rolesByUsername)
|
||||
.flatMap(Collection::stream)
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
private Mono<Set<String>> getOrganizationRoles(DefaultOAuth2User principal, Map<String, Object> additionalParams,
|
||||
AccessControlService acs, WebClient webClient) {
|
||||
String organization = principal.getAttribute(ORGANIZATION_ATTRIBUTE_NAME);
|
||||
if (organization == null) {
|
||||
log.debug("Github organization param is not present");
|
||||
return Mono.just(Collections.emptySet());
|
||||
}
|
||||
|
||||
final Mono<List<Map<String, Object>>> userOrganizations = webClient
|
||||
.get()
|
||||
|
@ -99,22 +113,76 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
//@formatter:on
|
||||
|
||||
return userOrganizations
|
||||
.map(orgsMap -> {
|
||||
var groupsByOrg = acs.getRoles()
|
||||
.stream()
|
||||
.filter(role -> role.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.filter(s -> s.getType().equals("organization"))
|
||||
.anyMatch(subject -> orgsMap.stream()
|
||||
.map(org -> org.get(ORGANIZATION_NAME).toString())
|
||||
.distinct()
|
||||
.anyMatch(orgName -> orgName.equalsIgnoreCase(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName);
|
||||
.map(orgsMap -> acs.getRoles()
|
||||
.stream()
|
||||
.filter(role -> role.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.filter(s -> s.getType().equals(ORGANIZATION))
|
||||
.anyMatch(subject -> orgsMap.stream()
|
||||
.map(org -> org.get(ORGANIZATION_NAME).toString())
|
||||
.anyMatch(orgName -> orgName.equalsIgnoreCase(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName)
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
return Stream.concat(groupsByOrg, groupsByUsername.stream()).collect(Collectors.toSet());
|
||||
});
|
||||
@SuppressWarnings("unchecked")
|
||||
private Mono<Set<String>> getTeamRoles(WebClient webClient, Map<String, Object> additionalParams,
|
||||
AccessControlService acs) {
|
||||
|
||||
var requestedTeams = acs.getRoles()
|
||||
.stream()
|
||||
.filter(r -> r.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.anyMatch(s -> s.getType().equals("team")))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
if (requestedTeams.isEmpty()) {
|
||||
log.debug("No roles with github teams found, skipping");
|
||||
return Mono.just(Collections.emptySet());
|
||||
}
|
||||
|
||||
final Mono<List<Map<String, Object>>> rawTeams = webClient
|
||||
.get()
|
||||
.uri(uriBuilder -> uriBuilder.path("/teams")
|
||||
.queryParam("per_page", ORGANIZATIONS_PER_PAGE)
|
||||
.build())
|
||||
.headers(headers -> {
|
||||
headers.set(HttpHeaders.ACCEPT, GITHUB_ACCEPT_HEADER);
|
||||
OAuth2UserRequest request = (OAuth2UserRequest) additionalParams.get("request");
|
||||
headers.setBearerAuth(request.getAccessToken().getTokenValue());
|
||||
})
|
||||
.retrieve()
|
||||
//@formatter:off
|
||||
.bodyToMono(new ParameterizedTypeReference<>() {});
|
||||
//@formatter:on
|
||||
|
||||
final Mono<List<String>> mappedTeams = rawTeams
|
||||
.map(teams -> teams.stream()
|
||||
.map(teamInfo -> {
|
||||
var name = teamInfo.get(TEAM_NAME);
|
||||
var orgInfo = (Map<String, Object>) teamInfo.get(ORGANIZATION);
|
||||
var orgName = orgInfo.get(ORGANIZATION_NAME);
|
||||
return orgName + "/" + name;
|
||||
})
|
||||
.map(Object::toString)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
|
||||
return mappedTeams
|
||||
.map(teams -> acs.getRoles()
|
||||
.stream()
|
||||
.filter(role -> role.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.filter(s -> s.getType().equals("team"))
|
||||
.anyMatch(subject -> teams.stream()
|
||||
.anyMatch(teamName -> teamName.equalsIgnoreCase(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName)
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import static lombok.AccessLevel.PRIVATE;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.provectus.kafka.ui.emitter.PolledRecords;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import io.micrometer.core.instrument.Counter;
|
||||
import io.micrometer.core.instrument.DistributionSummary;
|
||||
import io.micrometer.core.instrument.Gauge;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.micrometer.core.instrument.Metrics;
|
||||
import io.micrometer.core.instrument.Timer;
|
||||
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
||||
@RequiredArgsConstructor(access = PRIVATE)
|
||||
public class ApplicationMetrics {
|
||||
|
||||
private final String clusterName;
|
||||
private final MeterRegistry registry;
|
||||
|
||||
public static ApplicationMetrics forCluster(KafkaCluster cluster) {
|
||||
return new ApplicationMetrics(cluster.getName(), Metrics.globalRegistry);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static ApplicationMetrics noop() {
|
||||
return new ApplicationMetrics("noop", new SimpleMeterRegistry());
|
||||
}
|
||||
|
||||
public void meterPolledRecords(String topic, PolledRecords polled, boolean throttled) {
|
||||
pollTimer(topic).record(polled.elapsed());
|
||||
polledRecords(topic).increment(polled.count());
|
||||
polledBytes(topic).record(polled.bytes());
|
||||
if (throttled) {
|
||||
pollThrottlingActivations().increment();
|
||||
}
|
||||
}
|
||||
|
||||
private Counter polledRecords(String topic) {
|
||||
return Counter.builder("topic_records_polled")
|
||||
.description("Number of records polled from topic")
|
||||
.tag("cluster", clusterName)
|
||||
.tag("topic", topic)
|
||||
.register(registry);
|
||||
}
|
||||
|
||||
private DistributionSummary polledBytes(String topic) {
|
||||
return DistributionSummary.builder("topic_polled_bytes")
|
||||
.description("Bytes polled from kafka topic")
|
||||
.tag("cluster", clusterName)
|
||||
.tag("topic", topic)
|
||||
.register(registry);
|
||||
}
|
||||
|
||||
private Timer pollTimer(String topic) {
|
||||
return Timer.builder("topic_poll_time")
|
||||
.description("Time spend in polling for topic")
|
||||
.tag("cluster", clusterName)
|
||||
.tag("topic", topic)
|
||||
.register(registry);
|
||||
}
|
||||
|
||||
private Counter pollThrottlingActivations() {
|
||||
return Counter.builder("poll_throttling_activations")
|
||||
.description("Number of poll throttling activations")
|
||||
.tag("cluster", clusterName)
|
||||
.register(registry);
|
||||
}
|
||||
|
||||
public AtomicInteger activeConsumers() {
|
||||
var count = new AtomicInteger();
|
||||
Gauge.builder("active_consumers", () -> count)
|
||||
.description("Number of active consumers")
|
||||
.tag("cluster", clusterName)
|
||||
.register(registry);
|
||||
return count;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public class ConsumerRecordsUtil {
|
||||
|
||||
public static int calculatePolledRecSize(ConsumerRecord<Bytes, Bytes> rec) {
|
||||
int polledBytes = 0;
|
||||
for (Header header : rec.headers()) {
|
||||
polledBytes +=
|
||||
(header.key() != null ? header.key().getBytes().length : 0)
|
||||
+ (header.value() != null ? header.value().length : 0);
|
||||
}
|
||||
polledBytes += rec.key() == null ? 0 : rec.serializedKeySize();
|
||||
polledBytes += rec.value() == null ? 0 : rec.serializedValueSize();
|
||||
return polledBytes;
|
||||
}
|
||||
|
||||
public static int calculatePolledSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
|
||||
int polledBytes = 0;
|
||||
for (ConsumerRecord<Bytes, Bytes> rec : recs) {
|
||||
polledBytes += calculatePolledRecSize(rec);
|
||||
}
|
||||
return polledBytes;
|
||||
}
|
||||
|
||||
}
|
|
@ -10,7 +10,7 @@ management:
|
|||
endpoints:
|
||||
web:
|
||||
exposure:
|
||||
include: "info,health"
|
||||
include: "info,health,prometheus"
|
||||
|
||||
logging:
|
||||
level:
|
||||
|
|
|
@ -77,6 +77,8 @@ public abstract class AbstractIntegrationTest {
|
|||
System.setProperty("kafka.clusters.0.kafkaConnect.0.userName", "kafka-connect");
|
||||
System.setProperty("kafka.clusters.0.kafkaConnect.0.password", "kafka-connect");
|
||||
System.setProperty("kafka.clusters.0.kafkaConnect.0.address", kafkaConnect.getTarget());
|
||||
System.setProperty("kafka.clusters.0.kafkaConnect.1.name", "notavailable");
|
||||
System.setProperty("kafka.clusters.0.kafkaConnect.1.address", "http://notavailable:6666");
|
||||
System.setProperty("kafka.clusters.0.masking.0.type", "REPLACE");
|
||||
System.setProperty("kafka.clusters.0.masking.0.replacement", "***");
|
||||
System.setProperty("kafka.clusters.0.masking.0.topicValuesPattern", "masking-test-.*");
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.time.OffsetDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.header.internals.RecordHeaders;
|
||||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.RepeatedTest;
|
||||
|
||||
class MessagesProcessingTest {
|
||||
|
||||
|
||||
@RepeatedTest(5)
|
||||
void testSortingAsc() {
|
||||
var messagesInOrder = List.of(
|
||||
consumerRecord(1, 100L, "1999-01-01T00:00:00+00:00"),
|
||||
consumerRecord(0, 0L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(1, 200L, "2000-01-05T00:00:00+00:00"),
|
||||
consumerRecord(0, 10L, "2000-01-10T00:00:00+00:00"),
|
||||
consumerRecord(0, 20L, "2000-01-20T00:00:00+00:00"),
|
||||
consumerRecord(1, 300L, "3000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1000L, "4000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1001L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1003L, "3000-01-01T00:00:00+00:00")
|
||||
);
|
||||
|
||||
var shuffled = new ArrayList<>(messagesInOrder);
|
||||
Collections.shuffle(shuffled);
|
||||
|
||||
var sortedList = MessagesProcessing.sortForSending(shuffled, true);
|
||||
assertThat(sortedList).containsExactlyElementsOf(messagesInOrder);
|
||||
}
|
||||
|
||||
@RepeatedTest(5)
|
||||
void testSortingDesc() {
|
||||
var messagesInOrder = List.of(
|
||||
consumerRecord(1, 300L, "3000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1003L, "3000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(0, 20L, "2000-01-20T00:00:00+00:00"),
|
||||
consumerRecord(0, 10L, "2000-01-10T00:00:00+00:00"),
|
||||
consumerRecord(1, 200L, "2000-01-05T00:00:00+00:00"),
|
||||
consumerRecord(0, 0L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1001L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1000L, "4000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(1, 100L, "1999-01-01T00:00:00+00:00")
|
||||
);
|
||||
|
||||
var shuffled = new ArrayList<>(messagesInOrder);
|
||||
Collections.shuffle(shuffled);
|
||||
|
||||
var sortedList = MessagesProcessing.sortForSending(shuffled, false);
|
||||
assertThat(sortedList).containsExactlyElementsOf(messagesInOrder);
|
||||
}
|
||||
|
||||
private ConsumerRecord<Bytes, Bytes> consumerRecord(int partition, long offset, String ts) {
|
||||
return new ConsumerRecord<>(
|
||||
"topic", partition, offset, OffsetDateTime.parse(ts).toInstant().toEpochMilli(),
|
||||
TimestampType.CREATE_TIME,
|
||||
0, 0, null, null, new RecordHeaders(), Optional.empty()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package com.provectus.kafka.ui.serdes;
|
||||
|
||||
import static com.provectus.kafka.ui.serde.api.DeserializeResult.Type.STRING;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import java.util.Map;
|
||||
import java.util.function.UnaryOperator;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class ConsumerRecordDeserializerTest {
|
||||
|
||||
@Test
|
||||
void dataMaskingAppliedOnDeserializedMessage() {
|
||||
UnaryOperator<TopicMessageDTO> maskerMock = mock();
|
||||
Serde.Deserializer deser = (headers, data) -> new DeserializeResult("test", STRING, Map.of());
|
||||
|
||||
var recordDeser = new ConsumerRecordDeserializer("test", deser, "test", deser, "test", deser, deser, maskerMock);
|
||||
recordDeser.deserialize(new ConsumerRecord<>("t", 1, 1L, Bytes.wrap("t".getBytes()), Bytes.wrap("t".getBytes())));
|
||||
|
||||
verify(maskerMock).apply(any(TopicMessageDTO.class));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
|
||||
import com.provectus.kafka.ui.serdes.RecordHeadersImpl;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.CsvSource;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
|
||||
public class HexSerdeTest {
|
||||
|
||||
private static final byte[] TEST_BYTES = "hello world".getBytes();
|
||||
private static final String TEST_BYTES_HEX_ENCODED = "68 65 6C 6C 6F 20 77 6F 72 6C 64";
|
||||
|
||||
private HexSerde hexSerde;
|
||||
|
||||
@BeforeEach
|
||||
void init() {
|
||||
hexSerde = new HexSerde();
|
||||
hexSerde.autoConfigure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty());
|
||||
}
|
||||
|
||||
|
||||
@ParameterizedTest
|
||||
@CsvSource({
|
||||
"68656C6C6F20776F726C64", // uppercase
|
||||
"68656c6c6f20776f726c64", // lowercase
|
||||
"68:65:6c:6c:6f:20:77:6f:72:6c:64", // ':' delim
|
||||
"68 65 6C 6C 6F 20 77 6F 72 6C 64", // space delim, UC
|
||||
"68 65 6c 6c 6f 20 77 6f 72 6c 64", // space delim, LC
|
||||
"#68 #65 #6C #6C #6F #20 #77 #6F #72 #6C #64" // '#' prefix, space delim
|
||||
})
|
||||
void serializesInputAsHexString(String hexString) {
|
||||
for (Serde.Target type : Serde.Target.values()) {
|
||||
var serializer = hexSerde.serializer("anyTopic", type);
|
||||
byte[] bytes = serializer.serialize(hexString);
|
||||
assertThat(bytes).isEqualTo(TEST_BYTES);
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource
|
||||
void serializesEmptyStringAsEmptyBytesArray(Serde.Target type) {
|
||||
var serializer = hexSerde.serializer("anyTopic", type);
|
||||
byte[] bytes = serializer.serialize("");
|
||||
assertThat(bytes).isEqualTo(new byte[] {});
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource
|
||||
void deserializesDataAsHexBytes(Serde.Target type) {
|
||||
var deserializer = hexSerde.deserializer("anyTopic", type);
|
||||
var result = deserializer.deserialize(new RecordHeadersImpl(), TEST_BYTES);
|
||||
assertThat(result.getResult()).isEqualTo(TEST_BYTES_HEX_ENCODED);
|
||||
assertThat(result.getType()).isEqualTo(DeserializeResult.Type.STRING);
|
||||
assertThat(result.getAdditionalProperties()).isEmpty();
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource
|
||||
void getSchemaReturnsEmpty(Serde.Target type) {
|
||||
assertThat(hexSerde.getSchema("anyTopic", type)).isEmpty();
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource
|
||||
void canDeserializeReturnsTrueForAllInputs(Serde.Target type) {
|
||||
assertThat(hexSerde.canDeserialize("anyTopic", type)).isTrue();
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource
|
||||
void canSerializeReturnsTrueForAllInput(Serde.Target type) {
|
||||
assertThat(hexSerde.canSerialize("anyTopic", type)).isTrue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
|
||||
import com.google.protobuf.DescriptorProtos;
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class ProtobufRawSerdeTest {
|
||||
|
||||
private static final String DUMMY_TOPIC = "dummy-topic";
|
||||
|
||||
private ProtobufRawSerde serde;
|
||||
|
||||
@BeforeEach
|
||||
void init() {
|
||||
serde = new ProtobufRawSerde();
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
ProtobufSchema getSampleSchema() {
|
||||
return new ProtobufSchema(
|
||||
"""
|
||||
syntax = "proto3";
|
||||
message Message1 {
|
||||
int32 my_field = 1;
|
||||
}
|
||||
"""
|
||||
);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private byte[] getProtobufMessage() {
|
||||
DynamicMessage.Builder builder = DynamicMessage.newBuilder(getSampleSchema().toDescriptor("Message1"));
|
||||
builder.setField(builder.getDescriptorForType().findFieldByName("my_field"), 5);
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeSimpleMessage() {
|
||||
var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE)
|
||||
.deserialize(null, getProtobufMessage());
|
||||
assertThat(deserialized.getResult()).isEqualTo("1: 5\n");
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeEmptyMessage() {
|
||||
var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE)
|
||||
.deserialize(null, new byte[0]);
|
||||
assertThat(deserialized.getResult()).isEqualTo("");
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeInvalidMessage() {
|
||||
var deserializer = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE);
|
||||
assertThatThrownBy(() -> deserializer.deserialize(null, new byte[] { 1, 2, 3 }))
|
||||
.isInstanceOf(ValidationException.class)
|
||||
.hasMessageContaining("Protocol message contained an invalid tag");
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeNullMessage() {
|
||||
var deserializer = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE);
|
||||
assertThatThrownBy(() -> deserializer.deserialize(null, null))
|
||||
.isInstanceOf(ValidationException.class)
|
||||
.hasMessageContaining("Cannot read the array length");
|
||||
}
|
||||
|
||||
ProtobufSchema getSampleNestedSchema() {
|
||||
return new ProtobufSchema(
|
||||
"""
|
||||
syntax = "proto3";
|
||||
message Message2 {
|
||||
int32 my_nested_field = 1;
|
||||
}
|
||||
message Message1 {
|
||||
int32 my_field = 1;
|
||||
Message2 my_nested_message = 2;
|
||||
}
|
||||
"""
|
||||
);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private byte[] getComplexProtobufMessage() {
|
||||
DynamicMessage.Builder builder = DynamicMessage.newBuilder(getSampleNestedSchema().toDescriptor("Message1"));
|
||||
builder.setField(builder.getDescriptorForType().findFieldByName("my_field"), 5);
|
||||
DynamicMessage.Builder nestedBuilder = DynamicMessage.newBuilder(getSampleNestedSchema().toDescriptor("Message2"));
|
||||
nestedBuilder.setField(nestedBuilder.getDescriptorForType().findFieldByName("my_nested_field"), 10);
|
||||
builder.setField(builder.getDescriptorForType().findFieldByName("my_nested_message"), nestedBuilder.build());
|
||||
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeNestedMessage() {
|
||||
var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE)
|
||||
.deserialize(null, getComplexProtobufMessage());
|
||||
assertThat(deserialized.getResult()).isEqualTo("1: 5\n2: {\n 1: 10\n}\n");
|
||||
}
|
||||
}
|
|
@ -7,17 +7,20 @@ import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
|
|||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.AbstractIntegrationTest;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.PollingSettings;
|
||||
import com.provectus.kafka.ui.emitter.PollingThrottler;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.producer.KafkaTestProducer;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
|
||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -28,17 +31,15 @@ import java.util.UUID;
|
|||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Value;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -56,6 +57,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
static final String EMPTY_TOPIC = TOPIC + "_empty";
|
||||
static final List<Record> SENT_RECORDS = new ArrayList<>();
|
||||
static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer();
|
||||
static final Predicate<TopicMessageDTO> NOOP_FILTER = m -> true;
|
||||
|
||||
@BeforeAll
|
||||
static void generateMsgs() throws Exception {
|
||||
|
@ -91,6 +93,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
static void cleanup() {
|
||||
deleteTopic(TOPIC);
|
||||
deleteTopic(EMPTY_TOPIC);
|
||||
SENT_RECORDS.clear();
|
||||
}
|
||||
|
||||
private static ConsumerRecordDeserializer createRecordsDeserializer() {
|
||||
|
@ -103,28 +106,28 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
s.deserializer(null, Serde.Target.VALUE),
|
||||
StringSerde.name(),
|
||||
s.deserializer(null, Serde.Target.KEY),
|
||||
s.deserializer(null, Serde.Target.VALUE)
|
||||
s.deserializer(null, Serde.Target.VALUE),
|
||||
msg -> msg
|
||||
);
|
||||
}
|
||||
|
||||
private MessagesProcessing createMessagesProcessing() {
|
||||
return new MessagesProcessing(RECORD_DESERIALIZER, msg -> true, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
void pollNothingOnEmptyTopic() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
createMessagesProcessing(),
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
100,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
100,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -143,18 +146,21 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
@Test
|
||||
void pollFullTopicFromBeginning() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, TOPIC, null),
|
||||
createMessagesProcessing(),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(LATEST, TOPIC, null),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -172,18 +178,21 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
targetOffsets.put(new TopicPartition(TOPIC, i), offset);
|
||||
}
|
||||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
createMessagesProcessing(),
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -217,18 +226,21 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
);
|
||||
}
|
||||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
createMessagesProcessing(),
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -255,11 +267,12 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
targetOffsets.put(new TopicPartition(TOPIC, i), (long) MSGS_PER_PARTITION);
|
||||
}
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
numMessages,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -281,11 +294,12 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
offsets.put(new TopicPartition(TOPIC, i), 0L);
|
||||
}
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, offsets),
|
||||
100,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -325,22 +339,20 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
assertionsConsumer.accept(step.expectComplete().verifyThenAssertThat());
|
||||
}
|
||||
|
||||
private KafkaConsumer<Bytes, Bytes> createConsumer() {
|
||||
private EnhancedConsumer createConsumer() {
|
||||
return createConsumer(Map.of());
|
||||
}
|
||||
|
||||
private KafkaConsumer<Bytes, Bytes> createConsumer(Map<String, Object> properties) {
|
||||
private EnhancedConsumer createConsumer(Map<String, Object> properties) {
|
||||
final Map<String, ? extends Serializable> map = Map.of(
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
|
||||
ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
|
||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19, // to check multiple polls
|
||||
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
|
||||
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
|
||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19 // to check multiple polls
|
||||
);
|
||||
Properties props = new Properties();
|
||||
props.putAll(map);
|
||||
props.putAll(properties);
|
||||
return new KafkaConsumer<>(props);
|
||||
return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop());
|
||||
}
|
||||
|
||||
@Value
|
||||
|
|
|
@ -42,8 +42,9 @@ public class SchemaRegistryPaginationTest {
|
|||
new SchemaRegistryService.SubjectWithCompatibilityLevel(
|
||||
new SchemaSubject().subject(a.getArgument(1)), Compatibility.FULL)));
|
||||
|
||||
this.controller = new SchemasController(schemaRegistryService, new AccessControlServiceMock().getMock(),
|
||||
mock(AuditService.class));
|
||||
this.controller = new SchemasController(schemaRegistryService);
|
||||
this.controller.setAccessControlService(new AccessControlServiceMock().getMock());
|
||||
this.controller.setAuditService(mock(AuditService.class));
|
||||
this.controller.setClustersStorage(clustersStorage);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,8 +45,8 @@ class TopicsServicePaginationTest {
|
|||
private final ClusterMapper clusterMapper = new ClusterMapperImpl();
|
||||
private final AccessControlService accessControlService = new AccessControlServiceMock().getMock();
|
||||
|
||||
private final TopicsController topicsController = new TopicsController(
|
||||
topicsService, mock(TopicAnalysisService.class), clusterMapper, accessControlService, mock(AuditService.class));
|
||||
private final TopicsController topicsController =
|
||||
new TopicsController(topicsService, mock(TopicAnalysisService.class), clusterMapper);
|
||||
|
||||
private void init(Map<String, InternalTopic> topicsInCache) {
|
||||
|
||||
|
@ -59,6 +59,8 @@ class TopicsServicePaginationTest {
|
|||
List<String> lst = a.getArgument(1);
|
||||
return Mono.just(lst.stream().map(topicsInCache::get).collect(Collectors.toList()));
|
||||
});
|
||||
topicsController.setAccessControlService(accessControlService);
|
||||
topicsController.setAuditService(mock(AuditService.class));
|
||||
topicsController.setClustersStorage(clustersStorage);
|
||||
}
|
||||
|
||||
|
|
|
@ -4,16 +4,21 @@ import static org.assertj.core.api.Assertions.assertThat;
|
|||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.service.AdminClientService;
|
||||
import com.provectus.kafka.ui.service.ReactiveAdminClient;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import org.apache.kafka.common.acl.AccessControlEntry;
|
||||
import org.apache.kafka.common.acl.AclBinding;
|
||||
import org.apache.kafka.common.acl.AclOperation;
|
||||
import org.apache.kafka.common.acl.AclPermissionType;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
import org.apache.kafka.common.resource.Resource;
|
||||
import org.apache.kafka.common.resource.ResourcePattern;
|
||||
import org.apache.kafka.common.resource.ResourcePatternFilter;
|
||||
import org.apache.kafka.common.resource.ResourceType;
|
||||
|
@ -53,12 +58,12 @@ class AclsServiceTest {
|
|||
when(adminClientMock.listAcls(ResourcePatternFilter.ANY))
|
||||
.thenReturn(Mono.just(List.of(existingBinding1, existingBinding2)));
|
||||
|
||||
ArgumentCaptor<?> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls((Collection<AclBinding>) createdCaptor.capture()))
|
||||
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls(createdCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
ArgumentCaptor<?> deletedCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.deleteAcls((Collection<AclBinding>) deletedCaptor.capture()))
|
||||
ArgumentCaptor<Collection<AclBinding>> deletedCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.deleteAcls(deletedCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
aclsService.syncAclWithAclCsv(
|
||||
|
@ -68,15 +73,218 @@ class AclsServiceTest {
|
|||
+ "User:test3,GROUP,PREFIXED,groupNew,DESCRIBE,DENY,localhost"
|
||||
).block();
|
||||
|
||||
Collection<AclBinding> createdBindings = (Collection<AclBinding>) createdCaptor.getValue();
|
||||
Collection<AclBinding> createdBindings = createdCaptor.getValue();
|
||||
assertThat(createdBindings)
|
||||
.hasSize(1)
|
||||
.contains(newBindingToBeAdded);
|
||||
|
||||
Collection<AclBinding> deletedBindings = (Collection<AclBinding>) deletedCaptor.getValue();
|
||||
Collection<AclBinding> deletedBindings = deletedCaptor.getValue();
|
||||
assertThat(deletedBindings)
|
||||
.hasSize(1)
|
||||
.contains(existingBinding2);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void createsConsumerDependantAcls() {
|
||||
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls(createdCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
var principal = UUID.randomUUID().toString();
|
||||
var host = UUID.randomUUID().toString();
|
||||
|
||||
aclsService.createConsumerAcl(
|
||||
CLUSTER,
|
||||
new CreateConsumerAclDTO()
|
||||
.principal(principal)
|
||||
.host(host)
|
||||
.consumerGroups(List.of("cg1", "cg2"))
|
||||
.topics(List.of("t1", "t2"))
|
||||
).block();
|
||||
|
||||
//Read, Describe on topics, Read on consumerGroups
|
||||
Collection<AclBinding> createdBindings = createdCaptor.getValue();
|
||||
assertThat(createdBindings)
|
||||
.hasSize(6)
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t2", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t2", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.GROUP, "cg1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.GROUP, "cg2", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)));
|
||||
}
|
||||
|
||||
@Test
|
||||
void createsConsumerDependantAclsWhenTopicsAndGroupsSpecifiedByPrefix() {
|
||||
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls(createdCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
var principal = UUID.randomUUID().toString();
|
||||
var host = UUID.randomUUID().toString();
|
||||
|
||||
aclsService.createConsumerAcl(
|
||||
CLUSTER,
|
||||
new CreateConsumerAclDTO()
|
||||
.principal(principal)
|
||||
.host(host)
|
||||
.consumerGroupsPrefix("cgPref")
|
||||
.topicsPrefix("topicPref")
|
||||
).block();
|
||||
|
||||
//Read, Describe on topics, Read on consumerGroups
|
||||
Collection<AclBinding> createdBindings = createdCaptor.getValue();
|
||||
assertThat(createdBindings)
|
||||
.hasSize(3)
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.GROUP, "cgPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)));
|
||||
}
|
||||
|
||||
@Test
|
||||
void createsProducerDependantAcls() {
|
||||
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls(createdCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
var principal = UUID.randomUUID().toString();
|
||||
var host = UUID.randomUUID().toString();
|
||||
|
||||
aclsService.createProducerAcl(
|
||||
CLUSTER,
|
||||
new CreateProducerAclDTO()
|
||||
.principal(principal)
|
||||
.host(host)
|
||||
.topics(List.of("t1"))
|
||||
.idempotent(true)
|
||||
.transactionalId("txId1")
|
||||
).block();
|
||||
|
||||
//Write, Describe, Create permission on topics, Write, Describe on transactionalIds
|
||||
//IDEMPOTENT_WRITE on cluster if idempotent is enabled (true)
|
||||
Collection<AclBinding> createdBindings = createdCaptor.getValue();
|
||||
assertThat(createdBindings)
|
||||
.hasSize(6)
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.CREATE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TRANSACTIONAL_ID, "txId1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TRANSACTIONAL_ID, "txId1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.IDEMPOTENT_WRITE, AclPermissionType.ALLOW)));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void createsProducerDependantAclsWhenTopicsAndTxIdSpecifiedByPrefix() {
|
||||
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls(createdCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
var principal = UUID.randomUUID().toString();
|
||||
var host = UUID.randomUUID().toString();
|
||||
|
||||
aclsService.createProducerAcl(
|
||||
CLUSTER,
|
||||
new CreateProducerAclDTO()
|
||||
.principal(principal)
|
||||
.host(host)
|
||||
.topicsPrefix("topicPref")
|
||||
.transactionsIdPrefix("txIdPref")
|
||||
.idempotent(false)
|
||||
).block();
|
||||
|
||||
//Write, Describe, Create permission on topics, Write, Describe on transactionalIds
|
||||
//IDEMPOTENT_WRITE on cluster if idempotent is enabled (false)
|
||||
Collection<AclBinding> createdBindings = createdCaptor.getValue();
|
||||
assertThat(createdBindings)
|
||||
.hasSize(5)
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "topicPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.CREATE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TRANSACTIONAL_ID, "txIdPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TRANSACTIONAL_ID, "txIdPref", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.DESCRIBE, AclPermissionType.ALLOW)));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void createsStreamAppDependantAcls() {
|
||||
ArgumentCaptor<Collection<AclBinding>> createdCaptor = ArgumentCaptor.forClass(Collection.class);
|
||||
when(adminClientMock.createAcls(createdCaptor.capture()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
var principal = UUID.randomUUID().toString();
|
||||
var host = UUID.randomUUID().toString();
|
||||
|
||||
aclsService.createStreamAppAcl(
|
||||
CLUSTER,
|
||||
new CreateStreamAppAclDTO()
|
||||
.principal(principal)
|
||||
.host(host)
|
||||
.inputTopics(List.of("t1"))
|
||||
.outputTopics(List.of("t2", "t3"))
|
||||
.applicationId("appId1")
|
||||
).block();
|
||||
|
||||
// Read on input topics, Write on output topics
|
||||
// ALL on applicationId-prefixed Groups and Topics
|
||||
Collection<AclBinding> createdBindings = createdCaptor.getValue();
|
||||
assertThat(createdBindings)
|
||||
.hasSize(5)
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t1", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.READ, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t2", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "t3", PatternType.LITERAL),
|
||||
new AccessControlEntry(principal, host, AclOperation.WRITE, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.GROUP, "appId1", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.ALL, AclPermissionType.ALLOW)))
|
||||
.contains(new AclBinding(
|
||||
new ResourcePattern(ResourceType.TOPIC, "appId1", PatternType.PREFIXED),
|
||||
new AccessControlEntry(principal, host, AclOperation.ALL, AclPermissionType.ALLOW)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@ class AuditServiceTest {
|
|||
@Test
|
||||
void isAuditTopicChecksIfAuditIsEnabledForCluster() {
|
||||
Map<String, AuditWriter> writers = Map.of(
|
||||
"c1", new AuditWriter("с1", "c1topic", null, null),
|
||||
"c2", new AuditWriter("c2", "c2topic", mock(KafkaProducer.class), null)
|
||||
"c1", new AuditWriter("с1", true, "c1topic", null, null),
|
||||
"c2", new AuditWriter("c2", false, "c2topic", mock(KafkaProducer.class), null)
|
||||
);
|
||||
|
||||
var auditService = new AuditService(writers);
|
||||
|
@ -79,9 +79,20 @@ class AuditServiceTest {
|
|||
.thenReturn(mock(KafkaProducer.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
void logOnlyAlterOpsByDefault() {
|
||||
var auditProps = new ClustersProperties.AuditProperties();
|
||||
auditProps.setConsoleAuditEnabled(true);
|
||||
clustersProperties.setAudit(auditProps);
|
||||
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
assertThat(maybeWriter)
|
||||
.hasValueSatisfying(w -> assertThat(w.logAlterOperationsOnly()).isTrue());
|
||||
}
|
||||
|
||||
@Test
|
||||
void noWriterIfNoAuditPropsSet() {
|
||||
var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
assertThat(maybeWriter).isEmpty();
|
||||
}
|
||||
|
||||
|
@ -91,7 +102,7 @@ class AuditServiceTest {
|
|||
auditProps.setConsoleAuditEnabled(true);
|
||||
clustersProperties.setAudit(auditProps);
|
||||
|
||||
var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
assertThat(maybeWriter).isPresent();
|
||||
|
||||
var writer = maybeWriter.get();
|
||||
|
@ -116,7 +127,7 @@ class AuditServiceTest {
|
|||
when(adminClientMock.listTopics(true))
|
||||
.thenReturn(Mono.just(Set.of("test_audit_topic")));
|
||||
|
||||
var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
assertThat(maybeWriter).isPresent();
|
||||
|
||||
//checking there was no topic creation request
|
||||
|
@ -136,7 +147,7 @@ class AuditServiceTest {
|
|||
when(adminClientMock.createTopic(eq("test_audit_topic"), eq(3), eq(null), anyMap()))
|
||||
.thenReturn(Mono.empty());
|
||||
|
||||
var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
assertThat(maybeWriter).isPresent();
|
||||
|
||||
//verifying topic created
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
package com.provectus.kafka.ui.service.audit;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoInteractions;
|
||||
|
||||
import com.provectus.kafka.ui.config.auth.AuthenticatedUser;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext.AccessContextBuilder;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import java.util.List;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
import org.mockito.Mockito;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
class AuditWriterTest {
|
||||
|
||||
final KafkaProducer<byte[], byte[]> producerMock = Mockito.mock(KafkaProducer.class);
|
||||
final Logger loggerMock = Mockito.mock(Logger.class);
|
||||
final AuthenticatedUser user = new AuthenticatedUser("someone", List.of());
|
||||
|
||||
@Nested
|
||||
class AlterOperationsOnlyWriter {
|
||||
|
||||
final AuditWriter alterOnlyWriter = new AuditWriter("test", true, "test-topic", producerMock, loggerMock);
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource
|
||||
void onlyLogsWhenAlterOperationIsPresentForOneOfResources(AccessContext ctxWithAlterOperation) {
|
||||
alterOnlyWriter.write(ctxWithAlterOperation, user, null);
|
||||
verify(producerMock).send(any(), any());
|
||||
verify(loggerMock).info(any());
|
||||
}
|
||||
|
||||
static Stream<AccessContext> onlyLogsWhenAlterOperationIsPresentForOneOfResources() {
|
||||
Stream<UnaryOperator<AccessContextBuilder>> topicEditActions =
|
||||
TopicAction.ALTER_ACTIONS.stream().map(a -> c -> c.topic("test").topicActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> clusterConfigEditActions =
|
||||
ClusterConfigAction.ALTER_ACTIONS.stream().map(a -> c -> c.clusterConfigActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> aclEditActions =
|
||||
AclAction.ALTER_ACTIONS.stream().map(a -> c -> c.aclActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> cgEditActions =
|
||||
ConsumerGroupAction.ALTER_ACTIONS.stream().map(a -> c -> c.consumerGroup("cg").consumerGroupActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> schemaEditActions =
|
||||
SchemaAction.ALTER_ACTIONS.stream().map(a -> c -> c.schema("sc").schemaActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> connEditActions =
|
||||
ConnectAction.ALTER_ACTIONS.stream().map(a -> c -> c.connect("conn").connectActions(a));
|
||||
return Stream.of(
|
||||
topicEditActions, clusterConfigEditActions, aclEditActions,
|
||||
cgEditActions, connEditActions, schemaEditActions
|
||||
)
|
||||
.flatMap(c -> c)
|
||||
.map(setter -> setter.apply(AccessContext.builder().cluster("test").operationName("test")).build());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource
|
||||
void doesNothingIfNoResourceHasAlterAction(AccessContext readOnlyCxt) {
|
||||
alterOnlyWriter.write(readOnlyCxt, user, null);
|
||||
verifyNoInteractions(producerMock);
|
||||
verifyNoInteractions(loggerMock);
|
||||
}
|
||||
|
||||
static Stream<AccessContext> doesNothingIfNoResourceHasAlterAction() {
|
||||
return Stream.<UnaryOperator<AccessContextBuilder>>of(
|
||||
c -> c.topic("test").topicActions(TopicAction.VIEW),
|
||||
c -> c.clusterConfigActions(ClusterConfigAction.VIEW),
|
||||
c -> c.aclActions(AclAction.VIEW),
|
||||
c -> c.consumerGroup("cg").consumerGroupActions(ConsumerGroupAction.VIEW),
|
||||
c -> c.schema("sc").schemaActions(SchemaAction.VIEW),
|
||||
c -> c.connect("conn").connectActions(ConnectAction.VIEW)
|
||||
).map(setter -> setter.apply(AccessContext.builder().cluster("test").operationName("test")).build());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -61,7 +61,7 @@ class ConnectorsExporterTest {
|
|||
when(kafkaConnectService.getConnects(CLUSTER))
|
||||
.thenReturn(Flux.just(connect));
|
||||
|
||||
when(kafkaConnectService.getConnectorNames(CLUSTER, connect.getName()))
|
||||
when(kafkaConnectService.getConnectorNamesWithErrorsSuppress(CLUSTER, connect.getName()))
|
||||
.thenReturn(Flux.just(sinkConnector.getName(), sourceConnector.getName()));
|
||||
|
||||
when(kafkaConnectService.getConnector(CLUSTER, connect.getName(), sinkConnector.getName()))
|
||||
|
|
|
@ -1928,6 +1928,69 @@ paths:
|
|||
404:
|
||||
description: Acl not found
|
||||
|
||||
/api/clusters/{clusterName}/acl/consumer:
|
||||
post:
|
||||
tags:
|
||||
- Acls
|
||||
summary: createConsumerAcl
|
||||
operationId: createConsumerAcl
|
||||
parameters:
|
||||
- name: clusterName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/CreateConsumerAcl'
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
|
||||
/api/clusters/{clusterName}/acl/producer:
|
||||
post:
|
||||
tags:
|
||||
- Acls
|
||||
summary: createProducerAcl
|
||||
operationId: createProducerAcl
|
||||
parameters:
|
||||
- name: clusterName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/CreateProducerAcl'
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
|
||||
/api/clusters/{clusterName}/acl/streamApp:
|
||||
post:
|
||||
tags:
|
||||
- Acls
|
||||
summary: createStreamAppAcl
|
||||
operationId: createStreamAppAcl
|
||||
parameters:
|
||||
- name: clusterName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/CreateStreamAppAcl'
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
|
||||
/api/authorization:
|
||||
get:
|
||||
tags:
|
||||
|
@ -3611,7 +3674,7 @@ components:
|
|||
principal:
|
||||
type: string
|
||||
host:
|
||||
type: string # "*" if acl can be applied to any resource of given type
|
||||
type: string
|
||||
operation:
|
||||
type: string
|
||||
enum:
|
||||
|
@ -3635,6 +3698,69 @@ components:
|
|||
- ALLOW
|
||||
- DENY
|
||||
|
||||
CreateConsumerAcl:
|
||||
type: object
|
||||
required: [principal, host]
|
||||
properties:
|
||||
principal:
|
||||
type: string
|
||||
host:
|
||||
type: string
|
||||
topics:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
topicsPrefix:
|
||||
type: string
|
||||
consumerGroups:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
consumerGroupsPrefix:
|
||||
type: string
|
||||
|
||||
CreateProducerAcl:
|
||||
type: object
|
||||
required: [principal, host]
|
||||
properties:
|
||||
principal:
|
||||
type: string
|
||||
host:
|
||||
type: string
|
||||
topics:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
topicsPrefix:
|
||||
type: string
|
||||
transactionalId:
|
||||
type: string
|
||||
transactionsIdPrefix:
|
||||
type: string
|
||||
idempotent:
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
CreateStreamAppAcl:
|
||||
type: object
|
||||
required: [principal, host, applicationId, inputTopics, outputTopics]
|
||||
properties:
|
||||
principal:
|
||||
type: string
|
||||
host:
|
||||
type: string
|
||||
inputTopics:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
outputTopics:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
applicationId:
|
||||
nullable: false
|
||||
type: string
|
||||
|
||||
KafkaAclResourceType:
|
||||
type: string
|
||||
enum:
|
||||
|
@ -3910,10 +4036,6 @@ components:
|
|||
properties:
|
||||
pollTimeoutMs:
|
||||
type: integer
|
||||
partitionPollTimeout:
|
||||
type: integer
|
||||
noDataEmptyPolls:
|
||||
type: integer
|
||||
maxPageSize:
|
||||
type: integer
|
||||
defaultPageSize:
|
||||
|
@ -4068,6 +4190,9 @@ components:
|
|||
audit:
|
||||
type: object
|
||||
properties:
|
||||
level:
|
||||
type: string
|
||||
enum: [ "ALL", "ALTER_ONLY" ]
|
||||
topic:
|
||||
type: string
|
||||
auditTopicsPartitions:
|
||||
|
|
|
@ -19,6 +19,8 @@ import BrokerLogdir from 'components/Brokers/Broker/BrokerLogdir/BrokerLogdir';
|
|||
import BrokerMetrics from 'components/Brokers/Broker/BrokerMetrics/BrokerMetrics';
|
||||
import Navbar from 'components/common/Navigation/Navbar.styled';
|
||||
import PageLoader from 'components/common/PageLoader/PageLoader';
|
||||
import { ActionNavLink } from 'components/common/ActionComponent';
|
||||
import { Action, ResourceType } from 'generated-sources';
|
||||
|
||||
import Configs from './Configs/Configs';
|
||||
|
||||
|
@ -71,12 +73,16 @@ const Broker: React.FC = () => {
|
|||
>
|
||||
Configs
|
||||
</NavLink>
|
||||
<NavLink
|
||||
<ActionNavLink
|
||||
to={clusterBrokerMetricsPath(clusterName, brokerId)}
|
||||
className={({ isActive }) => (isActive ? 'is-active' : '')}
|
||||
permission={{
|
||||
resource: ResourceType.CLUSTERCONFIG,
|
||||
action: Action.VIEW,
|
||||
}}
|
||||
>
|
||||
Metrics
|
||||
</NavLink>
|
||||
</ActionNavLink>
|
||||
</Navbar>
|
||||
<Suspense fallback={<PageLoader />}>
|
||||
<Routes>
|
||||
|
|
|
@ -186,7 +186,7 @@ const Form: React.FC<FormProps> = ({ defaultValues, partitions, topics }) => {
|
|||
type="submit"
|
||||
disabled={partitionsValue.length === 0}
|
||||
>
|
||||
Submit
|
||||
Reset Offsets
|
||||
</Button>
|
||||
</div>
|
||||
</StyledForm>
|
||||
|
|
|
@ -15,6 +15,7 @@ import Form from './Form';
|
|||
const ResetOffsets: React.FC = () => {
|
||||
const routerParams = useAppParams<ClusterGroupParam>();
|
||||
|
||||
const { consumerGroupID } = routerParams;
|
||||
const consumerGroup = useConsumerGroupDetails(routerParams);
|
||||
|
||||
if (consumerGroup.isLoading || !consumerGroup.isSuccess)
|
||||
|
@ -37,7 +38,7 @@ const ResetOffsets: React.FC = () => {
|
|||
return (
|
||||
<>
|
||||
<PageHeading
|
||||
text="Reset offsets"
|
||||
text={consumerGroupID}
|
||||
backTo={clusterConsumerGroupsPath(routerParams.clusterName)}
|
||||
backText="Consumers"
|
||||
/>
|
||||
|
|
|
@ -19,8 +19,8 @@ const UserInfo = () => {
|
|||
</S.Wrapper>
|
||||
}
|
||||
>
|
||||
<DropdownItem>
|
||||
<S.LogoutLink href={`${window.basePath}/logout`}>Log out</S.LogoutLink>
|
||||
<DropdownItem href={`${window.basePath}/logout`}>
|
||||
<S.LogoutLink>Log out</S.LogoutLink>
|
||||
</DropdownItem>
|
||||
</Dropdown>
|
||||
) : null;
|
||||
|
|
|
@ -34,7 +34,6 @@ describe('UserInfo', () => {
|
|||
|
||||
const logout = screen.getByText('Log out');
|
||||
expect(logout).toBeInTheDocument();
|
||||
expect(logout).toHaveAttribute('href', '/logout');
|
||||
});
|
||||
|
||||
it('should render correct url during basePath initialization', async () => {
|
||||
|
@ -50,7 +49,6 @@ describe('UserInfo', () => {
|
|||
|
||||
const logout = screen.getByText('Log out');
|
||||
expect(logout).toBeInTheDocument();
|
||||
expect(logout).toHaveAttribute('href', `${baseUrl}/logout`);
|
||||
});
|
||||
|
||||
it('should not render anything if the username does not exists', () => {
|
||||
|
|
|
@ -86,9 +86,9 @@ const ActionsCell: React.FC<CellContext<Topic, unknown>> = ({ row }) => {
|
|||
Remove Topic
|
||||
{!isTopicDeletionAllowed && (
|
||||
<DropdownItemHint>
|
||||
The topic deletion is restricted at the application
|
||||
The topic deletion is restricted at the broker
|
||||
<br />
|
||||
configuration level
|
||||
configuration level (delete.topic.enable = false)
|
||||
</DropdownItemHint>
|
||||
)}
|
||||
</ActionDropdownItem>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue