Merge branch 'master' into issue/3614
This commit is contained in:
commit
8096690186
73 changed files with 1368 additions and 817 deletions
6
.github/workflows/frontend.yaml
vendored
6
.github/workflows/frontend.yaml
vendored
|
@ -23,11 +23,11 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
with:
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.7.0
|
||||
uses: actions/setup-node@v3.8.1
|
||||
with:
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
|
@ -49,7 +49,7 @@ jobs:
|
|||
cd kafka-ui-react-app/
|
||||
pnpm test:CI
|
||||
- name: SonarCloud Scan
|
||||
uses: workshur/sonarcloud-github-action@improved_basedir
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
with:
|
||||
projectBaseDir: ./kafka-ui-react-app
|
||||
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
|
||||
|
|
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
#FROM azul/zulu-openjdk-alpine:17-jre-headless
|
||||
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
|
||||
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN apk add --no-cache \
|
||||
# snappy codec
|
||||
gcompat \
|
||||
# configuring timezones
|
||||
tzdata
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
|
|
|
@ -57,8 +57,6 @@ public class ClustersProperties {
|
|||
@Data
|
||||
public static class PollingProperties {
|
||||
Integer pollTimeoutMs;
|
||||
Integer partitionPollTimeout;
|
||||
Integer noDataEmptyPolls;
|
||||
Integer maxPageSize;
|
||||
Integer defaultPageSize;
|
||||
}
|
||||
|
@ -152,7 +150,13 @@ public class ClustersProperties {
|
|||
Integer auditTopicsPartitions;
|
||||
Boolean topicAuditEnabled;
|
||||
Boolean consoleAuditEnabled;
|
||||
LogLevel level;
|
||||
Map<String, String> auditTopicProperties;
|
||||
|
||||
public enum LogLevel {
|
||||
ALL,
|
||||
ALTER_ONLY //default
|
||||
}
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
|
|
|
@ -6,11 +6,13 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
|
||||
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
|
||||
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
|
@ -39,7 +41,9 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
.authenticated()
|
||||
)
|
||||
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
|
||||
.logout(spec -> spec.logoutSuccessHandler(logoutSuccessHandler))
|
||||
.logout(spec -> spec
|
||||
.logoutSuccessHandler(logoutSuccessHandler)
|
||||
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
|
|
@ -2,12 +2,19 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
||||
public abstract class AbstractController {
|
||||
|
||||
private ClustersStorage clustersStorage;
|
||||
protected ClustersStorage clustersStorage;
|
||||
protected AccessControlService accessControlService;
|
||||
protected AuditService auditService;
|
||||
|
||||
protected KafkaCluster getCluster(String name) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
|
@ -15,8 +22,26 @@ public abstract class AbstractController {
|
|||
String.format("Cluster with name '%s' not found", name)));
|
||||
}
|
||||
|
||||
protected Mono<Void> validateAccess(AccessContext context) {
|
||||
return accessControlService.validateAccess(context);
|
||||
}
|
||||
|
||||
protected void audit(AccessContext acxt, Signal<?> sig) {
|
||||
auditService.audit(acxt, sig);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||
this.clustersStorage = clustersStorage;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAccessControlService(AccessControlService accessControlService) {
|
||||
this.accessControlService = accessControlService;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAuditService(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,8 +11,6 @@ import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.service.acl.AclsService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
|
@ -29,8 +27,6 @@ import reactor.core.publisher.Mono;
|
|||
public class AclsController extends AbstractController implements AclsApi {
|
||||
|
||||
private final AclsService aclsService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
|
@ -41,11 +37,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("createAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -58,11 +54,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("deleteAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -88,12 +84,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
|
||||
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
aclsService.listAcls(getCluster(clusterName), filter)
|
||||
.map(ClusterMapper::toKafkaAclDto)))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,11 +100,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("getAclAsCsv")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
aclsService.getAclAsCsvString(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.flatMap(Mono::just)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -120,10 +116,10 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("syncAclsCsv")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(csvMono)
|
||||
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -137,10 +133,10 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("createConsumerAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(createConsumerAclDto)
|
||||
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -154,10 +150,10 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("createProducerAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(createProducerAclDto)
|
||||
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -171,10 +167,10 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("createStreamAppAcl")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(createStreamAppAclDto)
|
||||
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,8 +15,6 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ApplicationInfoService;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
|
@ -39,7 +37,7 @@ import reactor.util.function.Tuples;
|
|||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
|
@ -51,12 +49,10 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
private final ApplicationInfoService applicationInfoService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
|
||||
|
@ -69,12 +65,12 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(VIEW)
|
||||
.operationName("getCurrentConfig")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -84,14 +80,15 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("restartWithConfig")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(restartRequestDto)
|
||||
.<ResponseEntity<Void>>map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
.doOnNext(restartDto -> {
|
||||
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
|
||||
dynamicConfigOperations.persist(newConfig);
|
||||
})
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnSuccess(dto -> restarter.requestRestart())
|
||||
.map(dto -> ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -101,13 +98,13 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("uploadConfigRelatedFile")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(fileFlux.single())
|
||||
.flatMap(file ->
|
||||
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -117,16 +114,16 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("validateConfig")
|
||||
.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(configDto)
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = newConfig.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
|
|
|
@ -36,10 +36,10 @@ public class AuthController {
|
|||
+ " <meta name=\"description\" content=\"\">\n"
|
||||
+ " <meta name=\"author\" content=\"\">\n"
|
||||
+ " <title>Please sign in</title>\n"
|
||||
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
|
||||
+ "crossorigin=\"anonymous\">\n"
|
||||
+ " <link href=\"/static/css/signin.css\" "
|
||||
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
|
||||
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
|
||||
+ " </head>\n"
|
||||
+ " <body>\n"
|
||||
|
|
|
@ -11,8 +11,6 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.service.BrokerService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -31,9 +29,6 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
|
||||
private final AuditService auditService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
|
@ -43,9 +38,9 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.build();
|
||||
|
||||
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,14 +52,14 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("id", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -80,10 +75,10 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerIds", brokerIds))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -97,11 +92,11 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(
|
||||
brokerService.getBrokerConfig(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerConfig))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -116,11 +111,11 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
brokerLogdir
|
||||
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -136,11 +131,11 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
brokerConfig
|
||||
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||
getCluster(clusterName), id, name, bci.getValue()))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,8 +6,6 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
|||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -21,8 +19,6 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class ClustersController extends AbstractController implements ClustersApi {
|
||||
private final ClusterService clusterService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
|
@ -40,13 +36,13 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("getClusterMetrics")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterMetrics(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -57,13 +53,13 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("getClusterStats")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterStats(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,8 +71,8 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("updateClusterInfo")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
|
@ -42,8 +40,6 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
|
||||
private final ConsumerGroupService consumerGroupService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Value("${consumer.groups.page.size:25}")
|
||||
private int defaultConsumerGroupsPageSize;
|
||||
|
@ -59,9 +55,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("deleteConsumerGroup")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -76,11 +72,11 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("getConsumerGroup")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
.map(ConsumerGroupMapper::toDetailsDto)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,9 +100,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,7 +121,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("getConsumerGroupsPage")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
consumerGroupService.getConsumerGroupsPage(
|
||||
getCluster(clusterName),
|
||||
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
|
||||
|
@ -136,7 +132,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
)
|
||||
.map(this::convertPage)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -191,9 +187,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
}
|
||||
};
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(mono.get())
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}).thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
|
|
@ -18,8 +18,6 @@ import com.provectus.kafka.ui.model.TaskDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -40,8 +38,6 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||
|
@ -64,9 +60,9 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectors")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -81,10 +77,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("createConnector")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,10 +96,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnector")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -119,10 +115,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
|
||||
|
@ -150,7 +146,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.sort(comparator);
|
||||
|
||||
return Mono.just(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -166,11 +162,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectorConfig")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -187,11 +183,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -214,11 +210,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -234,11 +230,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity
|
||||
.ok(kafkaConnectService
|
||||
.getConnectorTasks(getCluster(clusterName), connectName, connectorName))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -254,11 +250,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -272,11 +268,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectorPlugins")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -9,9 +9,7 @@ import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
|
|||
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -29,8 +27,6 @@ import reactor.core.publisher.Mono;
|
|||
public class KsqlController extends AbstractController implements KsqlApi {
|
||||
|
||||
private final KsqlServiceV2 ksqlServiceV2;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
||||
|
@ -44,13 +40,13 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("executeKsql")
|
||||
.operationParams(command)
|
||||
.build();
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
new KsqlCommandV2ResponseDTO().pipeId(
|
||||
ksqlServiceV2.registerCommand(
|
||||
getCluster(clusterName),
|
||||
command.getKsql(),
|
||||
Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
)
|
||||
.map(ResponseEntity::ok);
|
||||
|
@ -66,7 +62,7 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("openKsqlResponsePipe")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
|
||||
.map(table -> new KsqlResponseDTO()
|
||||
.table(
|
||||
|
@ -86,9 +82,9 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("listStreams")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,8 +96,8 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("listTables")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,8 +24,6 @@ import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.DeserializationService;
|
||||
import com.provectus.kafka.ui.service.MessagesService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -49,8 +47,6 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
|
||||
private final MessagesService messagesService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||
|
@ -63,13 +59,13 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.topicActions(MESSAGES_DELETE)
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).<ResponseEntity<Void>>then(
|
||||
return validateAccess(context).<ResponseEntity<Void>>then(
|
||||
messagesService.deleteTopicMessages(
|
||||
getCluster(clusterName),
|
||||
topicName,
|
||||
Optional.ofNullable(partitions).orElse(List.of())
|
||||
).thenReturn(ResponseEntity.ok().build())
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -120,9 +116,9 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
);
|
||||
|
||||
var context = contextBuilder.build();
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -137,11 +133,11 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.operationName("sendTopicMessages")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
createTopicMessage.flatMap(msg ->
|
||||
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -192,7 +188,7 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
|
||||
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
Mono.just(dto)
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.map(ResponseEntity::ok)
|
||||
|
|
|
@ -13,8 +13,6 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -38,8 +36,6 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
|
||||
|
||||
private final SchemaRegistryService schemaRegistryService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
protected KafkaCluster getCluster(String clusterName) {
|
||||
|
@ -61,7 +57,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("checkSchemaCompatibility")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
newSchemaSubjectMono.flatMap(subjectDTO ->
|
||||
schemaRegistryService.checksSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
|
@ -70,7 +66,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
))
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,7 +79,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("createNewSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
newSchemaSubjectMono.flatMap(newSubject ->
|
||||
schemaRegistryService.registerNewSchema(
|
||||
getCluster(clusterName),
|
||||
|
@ -92,7 +88,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
)
|
||||
).map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,9 +101,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteLatestSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -122,9 +118,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -139,9 +135,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteSchemaByVersion")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -160,9 +156,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
|
||||
.map(kafkaSrMapper::toDto);
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(schemas))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -185,11 +181,11 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("getLatestSchema")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -203,12 +199,12 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationParams(Map.of("subject", subject, "version", version))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
schemaRegistryService.getSchemaSubjectByVersion(
|
||||
getCluster(clusterName), subject, version)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -244,7 +240,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
|
||||
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
|
||||
}).map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -257,14 +253,14 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("updateGlobalSchemaCompatibilityLevel")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateGlobalSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -280,7 +276,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationParams(Map.of("subject", subject))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateSchemaCompatibility(
|
||||
|
@ -288,7 +284,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
subject,
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
|
|
@ -27,8 +27,6 @@ import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.TopicsService;
|
||||
import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -53,8 +51,6 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
private final TopicsService topicsService;
|
||||
private final TopicAnalysisService topicAnalysisService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
||||
|
@ -67,12 +63,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationParams(topicCreation)
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(topicsService.createTopic(getCluster(clusterName), topicCreation))
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -86,11 +82,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("recreateTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService.recreateTopic(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,11 +101,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationParams(Map.of("newTopicName", newTopicName))
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -123,11 +119,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("deleteTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
topicsService.deleteTopic(getCluster(clusterName), topicName)
|
||||
.thenReturn(ResponseEntity.ok().<Void>build())
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
|
||||
|
@ -142,7 +138,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicConfigs")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
|
||||
.map(lst -> lst.stream()
|
||||
.map(InternalTopicConfig::from)
|
||||
|
@ -150,7 +146,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.collect(toList()))
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -164,11 +160,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicDetails")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService.getTopicDetails(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopicDetails)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -215,7 +211,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.pageCount(totalPages));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -230,12 +226,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("updateTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicsService
|
||||
.updateTopic(getCluster(clusterName), topicName, topicUpdate)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -250,11 +246,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.topicActions(VIEW, EDIT)
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
partitionsIncrease.flatMap(partitions ->
|
||||
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -270,12 +266,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("changeReplicationFactor")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
replicationFactorChange
|
||||
.flatMap(rfc ->
|
||||
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -288,9 +284,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("analyzeTopic")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context).then(
|
||||
return validateAccess(context).then(
|
||||
topicAnalysisService.analyze(getCluster(clusterName), topicName)
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -305,9 +301,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("cancelTopicAnalysis")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -324,11 +320,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicAnalysis")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElseGet(() -> ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(
|
||||
|
|
|
@ -1,28 +1,22 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.time.Duration;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
public abstract class AbstractEmitter {
|
||||
abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final MessagesProcessing messagesProcessing;
|
||||
protected final PollingSettings pollingSettings;
|
||||
private final PollingSettings pollingSettings;
|
||||
|
||||
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
|
||||
this.messagesProcessing = messagesProcessing;
|
||||
this.pollingSettings = pollingSettings;
|
||||
}
|
||||
|
||||
protected PolledRecords poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
|
||||
return poll(sink, consumer, pollingSettings.getPollTimeout());
|
||||
}
|
||||
|
||||
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer, Duration timeout) {
|
||||
var records = consumer.pollEnhanced(timeout);
|
||||
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
|
||||
var records = consumer.pollEnhanced(pollingSettings.getPollTimeout());
|
||||
sendConsuming(sink, records);
|
||||
return records;
|
||||
}
|
||||
|
@ -31,9 +25,8 @@ public abstract class AbstractEmitter {
|
|||
return messagesProcessing.limitReached();
|
||||
}
|
||||
|
||||
protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecord<Bytes, Bytes> msg) {
|
||||
messagesProcessing.sendMsg(sink, msg);
|
||||
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
|
||||
messagesProcessing.send(sink, records);
|
||||
}
|
||||
|
||||
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class BackwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public BackwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
false,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readToOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readToOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readToOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().from()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readToOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readToOffsets.forEach((tp, toOffset) -> {
|
||||
long tpStartOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
if (toOffset > tpStartOffset) {
|
||||
result.put(tp, new FromToOffset(Math.max(tpStartOffset, toOffset - msgsToPollPerPartition), toOffset));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class BackwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
private final int messagesPerPage;
|
||||
|
||||
public BackwardRecordEmitter(
|
||||
Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting backward polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Created consumer");
|
||||
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
|
||||
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
|
||||
|
||||
while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) {
|
||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||
if (sink.isCancelled()) {
|
||||
return; //fast return in case of sink cancellation
|
||||
}
|
||||
long beginOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
|
||||
|
||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||
.forEach(r -> sendMessage(sink, r));
|
||||
|
||||
if (beginOffset == readFromOffset) {
|
||||
// we fully read this partition -> removing it from polling iterations
|
||||
readUntilOffsets.remove(tp);
|
||||
} else {
|
||||
// updating 'to' offset for next polling iteration
|
||||
readUntilOffsets.put(tp, readFromOffset);
|
||||
}
|
||||
});
|
||||
if (readUntilOffsets.isEmpty()) {
|
||||
log.debug("begin reached after partitions poll iteration");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.debug("sink is cancelled after partitions poll iteration");
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
||||
TopicPartition tp,
|
||||
long fromOffset,
|
||||
long toOffset,
|
||||
EnhancedConsumer consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink
|
||||
) {
|
||||
consumer.assign(Collections.singleton(tp));
|
||||
consumer.seek(tp, fromOffset);
|
||||
sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset));
|
||||
int desiredMsgsToPoll = (int) (toOffset - fromOffset);
|
||||
|
||||
var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& recordsToSend.size() < desiredMsgsToPoll
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
|
||||
emptyPolls.count(polledRecords.count());
|
||||
|
||||
log.debug("{} records polled from {}", polledRecords.count(), tp);
|
||||
|
||||
var filteredRecords = polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < toOffset)
|
||||
.toList();
|
||||
|
||||
if (polledRecords.count() > 0 && filteredRecords.isEmpty()) {
|
||||
// we already read all messages in target offsets interval
|
||||
break;
|
||||
}
|
||||
recordsToSend.addAll(filteredRecords);
|
||||
}
|
||||
log.debug("{} records to send", recordsToSend.size());
|
||||
Collections.reverse(recordsToSend);
|
||||
return recordsToSend;
|
||||
}
|
||||
}
|
|
@ -9,35 +9,37 @@ class ConsumingStats {
|
|||
private long bytes = 0;
|
||||
private int records = 0;
|
||||
private long elapsed = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
|
||||
PolledRecords polledRecords,
|
||||
int filterApplyErrors) {
|
||||
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
bytes += polledRecords.bytes();
|
||||
this.records += polledRecords.count();
|
||||
this.elapsed += polledRecords.elapsed().toMillis();
|
||||
records += polledRecords.count();
|
||||
elapsed += polledRecords.elapsed().toMillis();
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
.consuming(createConsumingStats())
|
||||
);
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
|
||||
void incFilterApplyError() {
|
||||
filterApplyErrors++;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.DONE)
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
.consuming(createConsumingStats())
|
||||
);
|
||||
}
|
||||
|
||||
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
|
||||
int filterApplyErrors) {
|
||||
private TopicMessageConsumingDTO createConsumingStats() {
|
||||
return new TopicMessageConsumingDTO()
|
||||
.bytesConsumed(this.bytes)
|
||||
.elapsedMs(this.elapsed)
|
||||
.isCancelled(sink.isCancelled())
|
||||
.bytesConsumed(bytes)
|
||||
.elapsedMs(elapsed)
|
||||
.isCancelled(false)
|
||||
.filterApplyErrors(filterApplyErrors)
|
||||
.messagesConsumed(this.records);
|
||||
.messagesConsumed(records);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
|
||||
// In some situations it is hard to say whether records range (between two offsets) was fully polled.
|
||||
// This happens when we have holes in records sequences that is usual case for compact topics or
|
||||
// topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
|
||||
// there is no guarantee that you will ever see record with offset Y.
|
||||
// To workaround this we can assume that after N consecutive empty polls all target messages were read.
|
||||
public class EmptyPollsCounter {
|
||||
|
||||
private final int maxEmptyPolls;
|
||||
|
||||
private int emptyPolls = 0;
|
||||
|
||||
EmptyPollsCounter(int maxEmptyPolls) {
|
||||
this.maxEmptyPolls = maxEmptyPolls;
|
||||
}
|
||||
|
||||
public void count(int polledCount) {
|
||||
emptyPolls = polledCount == 0 ? emptyPolls + 1 : 0;
|
||||
}
|
||||
|
||||
public boolean noDataEmptyPollsReached() {
|
||||
return emptyPolls >= maxEmptyPolls;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class ForwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public ForwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
true,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readFromOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readFromOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readFromOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().to()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readFromOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readFromOffsets.forEach((tp, fromOffset) -> {
|
||||
long tpEndOffset = seekOperations.getEndOffsets().get(tp);
|
||||
if (fromOffset < tpEndOffset) {
|
||||
result.put(tp, new FromToOffset(fromOffset, Math.min(tpEndOffset, fromOffset + msgsToPollPerPartition)));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class ForwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final ConsumerPosition position;
|
||||
|
||||
public ForwardRecordEmitter(
|
||||
Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition position,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.position = position;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting forward polling for {}", position);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Assigning partitions");
|
||||
var seekOperations = SeekOperations.create(consumer, position);
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& !seekOperations.assignedPartitionsFullyPolled()
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
|
||||
sendPhase(sink, "Polling");
|
||||
var records = poll(sink, consumer);
|
||||
emptyPolls.count(records.count());
|
||||
|
||||
log.debug("{} records polled", records.count());
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||
sendMessage(sink, msg);
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,67 +1,75 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static java.util.stream.Collectors.collectingAndThen;
|
||||
import static java.util.stream.Collectors.groupingBy;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class MessagesProcessing {
|
||||
@RequiredArgsConstructor
|
||||
class MessagesProcessing {
|
||||
|
||||
private final ConsumingStats consumingStats = new ConsumingStats();
|
||||
private long sentMessages = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
private final ConsumerRecordDeserializer deserializer;
|
||||
private final Predicate<TopicMessageDTO> filter;
|
||||
private final boolean ascendingSortBeforeSend;
|
||||
private final @Nullable Integer limit;
|
||||
|
||||
public MessagesProcessing(ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
@Nullable Integer limit) {
|
||||
this.deserializer = deserializer;
|
||||
this.filter = filter;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
boolean limitReached() {
|
||||
return limit != null && sentMessages >= limit;
|
||||
}
|
||||
|
||||
void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
if (!sink.isCancelled() && !limitReached()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
filterApplyErrors++;
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
|
||||
sortForSending(polled, ascendingSortBeforeSend)
|
||||
.forEach(rec -> {
|
||||
if (!limitReached() && !sink.isCancelled()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
consumingStats.incFilterApplyError();
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendConsumingEvt(sink, polledRecords, filterApplyErrors);
|
||||
consumingStats.sendConsumingEvt(sink, polledRecords);
|
||||
}
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendFinishEvent(sink, filterApplyErrors);
|
||||
consumingStats.sendFinishEvent(sink);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -75,4 +83,30 @@ public class MessagesProcessing {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Iterable<ConsumerRecord<Bytes, Bytes>> sortForSending(Iterable<ConsumerRecord<Bytes, Bytes>> records,
|
||||
boolean asc) {
|
||||
Comparator<ConsumerRecord> offsetComparator = asc
|
||||
? Comparator.comparingLong(ConsumerRecord::offset)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::offset).reversed();
|
||||
|
||||
// partition -> sorted by offsets records
|
||||
Map<Integer, List<ConsumerRecord<Bytes, Bytes>>> perPartition = Streams.stream(records)
|
||||
.collect(
|
||||
groupingBy(
|
||||
ConsumerRecord::partition,
|
||||
TreeMap::new,
|
||||
collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList())));
|
||||
|
||||
Comparator<ConsumerRecord> tsComparator = asc
|
||||
? Comparator.comparing(ConsumerRecord::timestamp)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::timestamp).reversed();
|
||||
|
||||
// merge-sorting records from partitions one by one using timestamp comparator
|
||||
return Iterables.mergeSorted(perPartition.values(), tsComparator);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,12 +8,13 @@ import java.util.Set;
|
|||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Slf4j
|
||||
@Getter
|
||||
public class OffsetsInfo {
|
||||
class OffsetsInfo {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
|
||||
|
@ -23,7 +24,7 @@ public class OffsetsInfo {
|
|||
private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
|
||||
private final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
|
@ -31,8 +32,7 @@ public class OffsetsInfo {
|
|||
);
|
||||
}
|
||||
|
||||
public OffsetsInfo(Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> targetPartitions) {
|
||||
OffsetsInfo(Consumer<?, ?> consumer, Collection<TopicPartition> targetPartitions) {
|
||||
this.consumer = consumer;
|
||||
this.beginOffsets = consumer.beginningOffsets(targetPartitions);
|
||||
this.endOffsets = consumer.endOffsets(targetPartitions);
|
||||
|
@ -46,8 +46,8 @@ public class OffsetsInfo {
|
|||
});
|
||||
}
|
||||
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp: consumer.assignment()) {
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp : consumer.assignment()) {
|
||||
Preconditions.checkArgument(endOffsets.containsKey(tp));
|
||||
if (endOffsets.get(tp) > consumer.position(tp)) {
|
||||
return false;
|
||||
|
@ -56,4 +56,10 @@ public class OffsetsInfo {
|
|||
return true;
|
||||
}
|
||||
|
||||
long summaryOffsetsRange() {
|
||||
MutableLong cnt = new MutableLong();
|
||||
nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp)));
|
||||
return cnt.getValue();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -8,13 +8,8 @@ import java.util.function.Supplier;
|
|||
public class PollingSettings {
|
||||
|
||||
private static final Duration DEFAULT_POLL_TIMEOUT = Duration.ofMillis(1_000);
|
||||
private static final Duration DEFAULT_PARTITION_POLL_TIMEOUT = Duration.ofMillis(200);
|
||||
private static final int DEFAULT_NO_DATA_EMPTY_POLLS = 3;
|
||||
|
||||
private final Duration pollTimeout;
|
||||
private final Duration partitionPollTimeout;
|
||||
private final int notDataEmptyPolls; //see EmptyPollsCounter docs
|
||||
|
||||
private final Supplier<PollingThrottler> throttlerSupplier;
|
||||
|
||||
public static PollingSettings create(ClustersProperties.Cluster cluster,
|
||||
|
@ -26,18 +21,8 @@ public class PollingSettings {
|
|||
? Duration.ofMillis(pollingProps.getPollTimeoutMs())
|
||||
: DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
var partitionPollTimeout = pollingProps.getPartitionPollTimeout() != null
|
||||
? Duration.ofMillis(pollingProps.getPartitionPollTimeout())
|
||||
: Duration.ofMillis(pollTimeout.toMillis() / 5);
|
||||
|
||||
int noDataEmptyPolls = pollingProps.getNoDataEmptyPolls() != null
|
||||
? pollingProps.getNoDataEmptyPolls()
|
||||
: DEFAULT_NO_DATA_EMPTY_POLLS;
|
||||
|
||||
return new PollingSettings(
|
||||
pollTimeout,
|
||||
partitionPollTimeout,
|
||||
noDataEmptyPolls,
|
||||
PollingThrottler.throttlerSupplier(cluster)
|
||||
);
|
||||
}
|
||||
|
@ -45,34 +30,20 @@ public class PollingSettings {
|
|||
public static PollingSettings createDefault() {
|
||||
return new PollingSettings(
|
||||
DEFAULT_POLL_TIMEOUT,
|
||||
DEFAULT_PARTITION_POLL_TIMEOUT,
|
||||
DEFAULT_NO_DATA_EMPTY_POLLS,
|
||||
PollingThrottler::noop
|
||||
);
|
||||
}
|
||||
|
||||
private PollingSettings(Duration pollTimeout,
|
||||
Duration partitionPollTimeout,
|
||||
int notDataEmptyPolls,
|
||||
Supplier<PollingThrottler> throttlerSupplier) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
this.partitionPollTimeout = partitionPollTimeout;
|
||||
this.notDataEmptyPolls = notDataEmptyPolls;
|
||||
this.throttlerSupplier = throttlerSupplier;
|
||||
}
|
||||
|
||||
public EmptyPollsCounter createEmptyPollsCounter() {
|
||||
return new EmptyPollsCounter(notDataEmptyPolls);
|
||||
}
|
||||
|
||||
public Duration getPollTimeout() {
|
||||
return pollTimeout;
|
||||
}
|
||||
|
||||
public Duration getPartitionPollTimeout() {
|
||||
return partitionPollTimeout;
|
||||
}
|
||||
|
||||
public PollingThrottler getPollingThrottler() {
|
||||
return throttlerSupplier.get();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
abstract class RangePollingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
protected final int messagesPerPage;
|
||||
|
||||
protected RangePollingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
|
||||
}
|
||||
|
||||
//should return empty map if polling should be stopped
|
||||
protected abstract TreeMap<TopicPartition, FromToOffset> nextPollingRange(
|
||||
TreeMap<TopicPartition, FromToOffset> prevRange, //empty on start
|
||||
SeekOperations seekOperations
|
||||
);
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Consumer created");
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
|
||||
log.debug("Starting from offsets {}", pollRange);
|
||||
|
||||
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
|
||||
var polled = poll(consumer, sink, pollRange);
|
||||
send(sink, polled);
|
||||
pollRange = nextPollingRange(pollRange, seekOperations);
|
||||
}
|
||||
if (sink.isCancelled()) {
|
||||
log.debug("Polling finished due to sink cancellation");
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> poll(EnhancedConsumer consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink,
|
||||
TreeMap<TopicPartition, FromToOffset> range) {
|
||||
log.trace("Polling range {}", range);
|
||||
sendPhase(sink,
|
||||
"Polling partitions: %s".formatted(range.keySet().stream().map(TopicPartition::partition).sorted().toList()));
|
||||
|
||||
consumer.assign(range.keySet());
|
||||
range.forEach((tp, fromTo) -> consumer.seek(tp, fromTo.from));
|
||||
|
||||
List<ConsumerRecord<Bytes, Bytes>> result = new ArrayList<>();
|
||||
while (!sink.isCancelled() && consumer.paused().size() < range.size()) {
|
||||
var polledRecords = poll(sink, consumer);
|
||||
range.forEach((tp, fromTo) -> {
|
||||
polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < fromTo.to)
|
||||
.forEach(result::add);
|
||||
|
||||
//next position is out of target range -> pausing partition
|
||||
if (consumer.position(tp) >= fromTo.to) {
|
||||
consumer.pause(List.of(tp));
|
||||
}
|
||||
});
|
||||
}
|
||||
consumer.resume(consumer.paused());
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -10,17 +10,18 @@ import java.util.stream.Collectors;
|
|||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
|
||||
class SeekOperations {
|
||||
public class SeekOperations {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
private final OffsetsInfo offsetsInfo;
|
||||
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||
|
||||
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
OffsetsInfo offsetsInfo;
|
||||
if (consumerPosition.getSeekTo() == null) {
|
||||
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
|
||||
|
@ -34,25 +35,37 @@ class SeekOperations {
|
|||
);
|
||||
}
|
||||
|
||||
void assignAndSeekNonEmptyPartitions() {
|
||||
public void assignAndSeekNonEmptyPartitions() {
|
||||
consumer.assign(offsetsForSeek.keySet());
|
||||
offsetsForSeek.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> getBeginOffsets() {
|
||||
public Map<TopicPartition, Long> getBeginOffsets() {
|
||||
return offsetsInfo.getBeginOffsets();
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> getEndOffsets() {
|
||||
public Map<TopicPartition, Long> getEndOffsets() {
|
||||
return offsetsInfo.getEndOffsets();
|
||||
}
|
||||
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
return offsetsInfo.assignedPartitionsFullyPolled();
|
||||
}
|
||||
|
||||
// sum of (end - start) offsets for all partitions
|
||||
public long summaryOffsetsRange() {
|
||||
return offsetsInfo.summaryOffsetsRange();
|
||||
}
|
||||
|
||||
// sum of differences between initial consumer seek and current consumer position (across all partitions)
|
||||
public long offsetsProcessedFromSeek() {
|
||||
MutableLong count = new MutableLong();
|
||||
offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset));
|
||||
return count.getValue();
|
||||
}
|
||||
|
||||
// Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
|
||||
Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
public Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
return offsetsForSeek;
|
||||
}
|
||||
|
||||
|
@ -61,19 +74,19 @@ class SeekOperations {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
switch (seekType) {
|
||||
case LATEST:
|
||||
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case BEGINNING:
|
||||
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case OFFSET:
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
return fixOffsets(offsetsInfo, seekTo);
|
||||
case TIMESTAMP:
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
|
@ -100,7 +113,7 @@ class SeekOperations {
|
|||
}
|
||||
|
||||
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
timestamps = new HashMap<>(timestamps);
|
||||
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
|
||||
|
|
|
@ -1,25 +1,28 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.HashMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class TailingEmitter extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
public class TailingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public TailingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
MessagesProcessing messagesProcessing,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings);
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
@ -32,7 +35,7 @@ public class TailingEmitter extends AbstractEmitter
|
|||
while (!sink.isCancelled()) {
|
||||
sendPhase(sink, "Polling");
|
||||
var polled = poll(sink, consumer);
|
||||
polled.forEach(r -> sendMessage(sink, r));
|
||||
send(sink, polled);
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Tailing finished");
|
||||
|
|
|
@ -1,15 +1,25 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum AclAction implements PermissibleAction {
|
||||
|
||||
VIEW,
|
||||
EDIT;
|
||||
EDIT
|
||||
|
||||
;
|
||||
|
||||
public static final Set<AclAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static AclAction fromString(String name) {
|
||||
return EnumUtils.getEnum(AclAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -10,9 +11,15 @@ public enum ApplicationConfigAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ApplicationConfigAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static ApplicationConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ApplicationConfigAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,14 +1,24 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum AuditAction implements PermissibleAction {
|
||||
|
||||
VIEW;
|
||||
VIEW
|
||||
|
||||
;
|
||||
|
||||
private static final Set<AuditAction> ALTER_ACTIONS = Set.of();
|
||||
|
||||
@Nullable
|
||||
public static AuditAction fromString(String name) {
|
||||
return EnumUtils.getEnum(AuditAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -10,9 +11,15 @@ public enum ClusterConfigAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ClusterConfigAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static ClusterConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ClusterConfigAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -12,9 +13,15 @@ public enum ConnectAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ConnectAction> ALTER_ACTIONS = Set.of(CREATE, EDIT, RESTART);
|
||||
|
||||
@Nullable
|
||||
public static ConnectAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ConnectAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -7,14 +8,19 @@ public enum ConsumerGroupAction implements PermissibleAction {
|
|||
|
||||
VIEW,
|
||||
DELETE,
|
||||
|
||||
RESET_OFFSETS
|
||||
|
||||
;
|
||||
|
||||
public static final Set<ConsumerGroupAction> ALTER_ACTIONS = Set.of(DELETE, RESET_OFFSETS);
|
||||
|
||||
@Nullable
|
||||
public static ConsumerGroupAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ConsumerGroupAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,24 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum KsqlAction implements PermissibleAction {
|
||||
|
||||
EXECUTE;
|
||||
EXECUTE
|
||||
|
||||
;
|
||||
|
||||
public static final Set<KsqlAction> ALTER_ACTIONS = Set.of(EXECUTE);
|
||||
|
||||
@Nullable
|
||||
public static KsqlAction fromString(String name) {
|
||||
return EnumUtils.getEnum(KsqlAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,4 +5,9 @@ public sealed interface PermissibleAction permits
|
|||
ConsumerGroupAction, SchemaAction,
|
||||
ConnectAction, ClusterConfigAction,
|
||||
KsqlAction, TopicAction, AuditAction {
|
||||
|
||||
String name();
|
||||
|
||||
boolean isAlter();
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -13,9 +14,15 @@ public enum SchemaAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<SchemaAction> ALTER_ACTIONS = Set.of(CREATE, DELETE, EDIT, MODIFY_GLOBAL_COMPATIBILITY);
|
||||
|
||||
@Nullable
|
||||
public static SchemaAction fromString(String name) {
|
||||
return EnumUtils.getEnum(SchemaAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -9,16 +10,21 @@ public enum TopicAction implements PermissibleAction {
|
|||
CREATE,
|
||||
EDIT,
|
||||
DELETE,
|
||||
|
||||
MESSAGES_READ,
|
||||
MESSAGES_PRODUCE,
|
||||
MESSAGES_DELETE,
|
||||
|
||||
;
|
||||
|
||||
public static final Set<TopicAction> ALTER_ACTIONS = Set.of(CREATE, EDIT, DELETE, MESSAGES_PRODUCE, MESSAGES_DELETE);
|
||||
|
||||
@Nullable
|
||||
public static TopicAction fromString(String name) {
|
||||
return EnumUtils.getEnum(TopicAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.serdes;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO.TimestampTypeEnum;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import java.time.Instant;
|
||||
import java.time.OffsetDateTime;
|
||||
|
@ -8,6 +9,7 @@ import java.time.ZoneId;
|
|||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.UnaryOperator;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
@ -32,6 +34,8 @@ public class ConsumerRecordDeserializer {
|
|||
private final Serde.Deserializer fallbackKeyDeserializer;
|
||||
private final Serde.Deserializer fallbackValueDeserializer;
|
||||
|
||||
private final UnaryOperator<TopicMessageDTO> masker;
|
||||
|
||||
public TopicMessageDTO deserialize(ConsumerRecord<Bytes, Bytes> rec) {
|
||||
var message = new TopicMessageDTO();
|
||||
fillKey(message, rec);
|
||||
|
@ -47,20 +51,15 @@ public class ConsumerRecordDeserializer {
|
|||
message.setValueSize(getValueSize(rec));
|
||||
message.setHeadersSize(getHeadersSize(rec));
|
||||
|
||||
return message;
|
||||
return masker.apply(message);
|
||||
}
|
||||
|
||||
private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
switch (timestampType) {
|
||||
case CREATE_TIME:
|
||||
return TopicMessageDTO.TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME:
|
||||
return TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE:
|
||||
return TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown timestampType: " + timestampType);
|
||||
}
|
||||
private static TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
return switch (timestampType) {
|
||||
case CREATE_TIME -> TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME -> TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE -> TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
};
|
||||
}
|
||||
|
||||
private void fillHeaders(TopicMessageDTO message, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
|
|
|
@ -16,6 +16,7 @@ import com.provectus.kafka.ui.serdes.builtin.HexSerde;
|
|||
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int64Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ProtobufRawSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.UInt32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.UInt64Serde;
|
||||
|
@ -50,6 +51,7 @@ public class SerdesInitializer {
|
|||
.put(Base64Serde.name(), Base64Serde.class)
|
||||
.put(HexSerde.name(), HexSerde.class)
|
||||
.put(UuidBinarySerde.name(), UuidBinarySerde.class)
|
||||
.put(ProtobufRawSerde.name(), ProtobufRawSerde.class)
|
||||
.build(),
|
||||
new CustomSerdeLoader()
|
||||
);
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.serdes.builtin;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.JsonSerializer;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.SerializerProvider;
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule;
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.google.protobuf.UnknownFieldSet;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import lombok.SneakyThrows;
|
||||
|
||||
public class ProtobufRawSerde implements BuiltInSerde {
|
||||
|
||||
public static String name() {
|
||||
return "ProtobufDecodeRaw";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<SchemaDescription> getSchema(String topic, Target type) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSerialize(String topic, Target type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDeserialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@SneakyThrows
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
try {
|
||||
UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data);
|
||||
return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of());
|
||||
} catch (Exception e) {
|
||||
throw new ValidationException(e.getMessage());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -102,7 +102,8 @@ public class DeserializationService implements Closeable {
|
|||
valueSerde.deserializer(topic, Serde.Target.VALUE),
|
||||
fallbackSerde.getName(),
|
||||
fallbackSerde.deserializer(topic, Serde.Target.KEY),
|
||||
fallbackSerde.deserializer(topic, Serde.Target.VALUE)
|
||||
fallbackSerde.deserializer(topic, Serde.Target.VALUE),
|
||||
cluster.getMasking().getMaskerForTopic(topic)
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,10 +2,9 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessageFilters;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
||||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
|
@ -18,7 +17,6 @@ import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
|||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.time.Instant;
|
||||
|
@ -45,7 +43,6 @@ import org.apache.kafka.common.TopicPartition;
|
|||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
|
@ -231,54 +228,24 @@ public class MessagesService {
|
|||
@Nullable String keySerde,
|
||||
@Nullable String valueSerde) {
|
||||
|
||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||
|
||||
var processing = new MessagesProcessing(
|
||||
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
|
||||
getMsgFilter(query, filterQueryType),
|
||||
seekDirection == SeekDirectionDTO.TAILING ? null : limit
|
||||
);
|
||||
|
||||
if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
||||
emitter = new ForwardRecordEmitter(
|
||||
var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
||||
var filter = getMsgFilter(query, filterQueryType);
|
||||
var emitter = switch (seekDirection) {
|
||||
case FORWARD -> new ForwardEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
||||
);
|
||||
} else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
||||
emitter = new BackwardRecordEmitter(
|
||||
case BACKWARD -> new BackwardEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
limit,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
||||
);
|
||||
} else {
|
||||
emitter = new TailingEmitter(
|
||||
case TAILING -> new TailingEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
consumerPosition, deserializer, filter, cluster.getPollingSettings()
|
||||
);
|
||||
}
|
||||
return Flux.create(emitter)
|
||||
.map(getDataMasker(cluster, topic))
|
||||
.map(throttleUiPublish(seekDirection));
|
||||
}
|
||||
|
||||
private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
||||
var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
||||
var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
||||
return evt -> {
|
||||
if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
||||
return evt;
|
||||
}
|
||||
return evt.message(
|
||||
evt.getMessage()
|
||||
.key(keyMasker.apply(evt.getMessage().getKey()))
|
||||
.content(valMasker.apply(evt.getMessage().getContent())));
|
||||
};
|
||||
return Flux.create(emitter)
|
||||
.map(throttleUiPublish(seekDirection));
|
||||
}
|
||||
|
||||
private Predicate<TopicMessageDTO> getMsgFilter(String query,
|
||||
|
|
|
@ -92,14 +92,12 @@ class AnalysisTasksStore {
|
|||
.result(completedState);
|
||||
}
|
||||
|
||||
@Value
|
||||
@Builder(toBuilder = true)
|
||||
private static class RunningAnalysis {
|
||||
Instant startedAt;
|
||||
double completenessPercent;
|
||||
long msgsScanned;
|
||||
long bytesScanned;
|
||||
Closeable task;
|
||||
private record RunningAnalysis(Instant startedAt,
|
||||
double completenessPercent,
|
||||
long msgsScanned,
|
||||
long bytesScanned,
|
||||
Closeable task) {
|
||||
|
||||
TopicAnalysisProgressDTO toDto() {
|
||||
return new TopicAnalysisProgressDTO()
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package com.provectus.kafka.ui.service.analyze;
|
||||
|
||||
import com.provectus.kafka.ui.emitter.EmptyPollsCounter;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
||||
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.emitter.OffsetsInfo;
|
||||
import com.provectus.kafka.ui.emitter.PollingSettings;
|
||||
import com.provectus.kafka.ui.emitter.SeekOperations;
|
||||
import com.provectus.kafka.ui.exception.TopicAnalysisException;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.TopicAnalysisDTO;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
|
@ -15,16 +16,14 @@ import java.time.Instant;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.errors.WakeupException;
|
||||
import org.springframework.stereotype.Component;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Scheduler;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
|
||||
|
@ -33,6 +32,14 @@ import reactor.core.scheduler.Schedulers;
|
|||
@RequiredArgsConstructor
|
||||
public class TopicAnalysisService {
|
||||
|
||||
private static final Scheduler SCHEDULER = Schedulers.newBoundedElastic(
|
||||
Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE,
|
||||
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
||||
"topic-analysis-tasks",
|
||||
10, //ttl for idle threads (in sec)
|
||||
true //daemon
|
||||
);
|
||||
|
||||
private final AnalysisTasksStore analysisTasksStore = new AnalysisTasksStore();
|
||||
|
||||
private final TopicsService topicsService;
|
||||
|
@ -40,30 +47,18 @@ public class TopicAnalysisService {
|
|||
|
||||
public Mono<Void> analyze(KafkaCluster cluster, String topicName) {
|
||||
return topicsService.getTopicDetails(cluster, topicName)
|
||||
.doOnNext(topic ->
|
||||
startAnalysis(
|
||||
cluster,
|
||||
topicName,
|
||||
topic.getPartitionCount(),
|
||||
topic.getPartitions().values()
|
||||
.stream()
|
||||
.mapToLong(p -> p.getOffsetMax() - p.getOffsetMin())
|
||||
.sum()
|
||||
)
|
||||
).then();
|
||||
.doOnNext(topic -> startAnalysis(cluster, topicName))
|
||||
.then();
|
||||
}
|
||||
|
||||
private synchronized void startAnalysis(KafkaCluster cluster,
|
||||
String topic,
|
||||
int partitionsCnt,
|
||||
long approxNumberOfMsgs) {
|
||||
private synchronized void startAnalysis(KafkaCluster cluster, String topic) {
|
||||
var topicId = new TopicIdentity(cluster, topic);
|
||||
if (analysisTasksStore.isAnalysisInProgress(topicId)) {
|
||||
throw new TopicAnalysisException("Topic is already analyzing");
|
||||
}
|
||||
var task = new AnalysisTask(cluster, topicId, partitionsCnt, approxNumberOfMsgs, cluster.getPollingSettings());
|
||||
var task = new AnalysisTask(cluster, topicId);
|
||||
analysisTasksStore.registerNewTask(topicId, task);
|
||||
Schedulers.boundedElastic().schedule(task);
|
||||
SCHEDULER.schedule(task);
|
||||
}
|
||||
|
||||
public void cancelAnalysis(KafkaCluster cluster, String topicName) {
|
||||
|
@ -79,20 +74,14 @@ public class TopicAnalysisService {
|
|||
private final Instant startedAt = Instant.now();
|
||||
|
||||
private final TopicIdentity topicId;
|
||||
private final int partitionsCnt;
|
||||
private final long approxNumberOfMsgs;
|
||||
private final EmptyPollsCounter emptyPollsCounter;
|
||||
|
||||
private final TopicAnalysisStats totalStats = new TopicAnalysisStats();
|
||||
private final Map<Integer, TopicAnalysisStats> partitionStats = new HashMap<>();
|
||||
|
||||
private final EnhancedConsumer consumer;
|
||||
|
||||
AnalysisTask(KafkaCluster cluster, TopicIdentity topicId, int partitionsCnt,
|
||||
long approxNumberOfMsgs, PollingSettings pollingSettings) {
|
||||
AnalysisTask(KafkaCluster cluster, TopicIdentity topicId) {
|
||||
this.topicId = topicId;
|
||||
this.approxNumberOfMsgs = approxNumberOfMsgs;
|
||||
this.partitionsCnt = partitionsCnt;
|
||||
this.consumer = consumerGroupService.createConsumer(
|
||||
cluster,
|
||||
// to improve polling throughput
|
||||
|
@ -101,7 +90,6 @@ public class TopicAnalysisService {
|
|||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "100000"
|
||||
)
|
||||
);
|
||||
this.emptyPollsCounter = pollingSettings.createEmptyPollsCounter();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -113,23 +101,20 @@ public class TopicAnalysisService {
|
|||
public void run() {
|
||||
try {
|
||||
log.info("Starting {} topic analysis", topicId);
|
||||
var topicPartitions = IntStream.range(0, partitionsCnt)
|
||||
.peek(i -> partitionStats.put(i, new TopicAnalysisStats()))
|
||||
.mapToObj(i -> new TopicPartition(topicId.topicName, i))
|
||||
.collect(Collectors.toList());
|
||||
consumer.partitionsFor(topicId.topicName)
|
||||
.forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats()));
|
||||
|
||||
consumer.assign(topicPartitions);
|
||||
consumer.seekToBeginning(topicPartitions);
|
||||
var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null));
|
||||
long summaryOffsetsRange = seekOperations.summaryOffsetsRange();
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
var offsetsInfo = new OffsetsInfo(consumer, topicId.topicName);
|
||||
while (!offsetsInfo.assignedPartitionsFullyPolled() && !emptyPollsCounter.noDataEmptyPollsReached()) {
|
||||
while (!seekOperations.assignedPartitionsFullyPolled()) {
|
||||
var polled = consumer.pollEnhanced(Duration.ofSeconds(3));
|
||||
emptyPollsCounter.count(polled.count());
|
||||
polled.forEach(r -> {
|
||||
totalStats.apply(r);
|
||||
partitionStats.get(r.partition()).apply(r);
|
||||
});
|
||||
updateProgress();
|
||||
updateProgress(seekOperations.offsetsProcessedFromSeek(), summaryOffsetsRange);
|
||||
}
|
||||
analysisTasksStore.setAnalysisResult(topicId, startedAt, totalStats, partitionStats);
|
||||
log.info("{} topic analysis finished", topicId);
|
||||
|
@ -145,13 +130,13 @@ public class TopicAnalysisService {
|
|||
}
|
||||
}
|
||||
|
||||
private void updateProgress() {
|
||||
if (totalStats.totalMsgs > 0 && approxNumberOfMsgs != 0) {
|
||||
private void updateProgress(long processedOffsets, long summaryOffsetsRange) {
|
||||
if (processedOffsets > 0 && summaryOffsetsRange != 0) {
|
||||
analysisTasksStore.updateProgress(
|
||||
topicId,
|
||||
totalStats.totalMsgs,
|
||||
totalStats.keysSize.sum + totalStats.valuesSize.sum,
|
||||
Math.min(100.0, (((double) totalStats.totalMsgs) / approxNumberOfMsgs) * 100)
|
||||
Math.min(100.0, (((double) processedOffsets) / summaryOffsetsRange) * 100)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.exception.CustomBaseException;
|
|||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.Resource;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.PermissibleAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
|
@ -33,16 +34,20 @@ record AuditRecord(String timestamp,
|
|||
return MAPPER.writeValueAsString(this);
|
||||
}
|
||||
|
||||
record AuditResource(String accessType, Resource type, @Nullable Object id) {
|
||||
record AuditResource(String accessType, boolean alter, Resource type, @Nullable Object id) {
|
||||
|
||||
private static AuditResource create(PermissibleAction action, Resource type, @Nullable Object id) {
|
||||
return new AuditResource(action.name(), action.isAlter(), type, id);
|
||||
}
|
||||
|
||||
static List<AuditResource> getAccessedResources(AccessContext ctx) {
|
||||
List<AuditResource> resources = new ArrayList<>();
|
||||
ctx.getClusterConfigActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.CLUSTERCONFIG, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.CLUSTERCONFIG, null)));
|
||||
ctx.getTopicActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.TOPIC, nameId(ctx.getTopic()))));
|
||||
.forEach(a -> resources.add(create(a, Resource.TOPIC, nameId(ctx.getTopic()))));
|
||||
ctx.getConsumerGroupActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.CONSUMER, nameId(ctx.getConsumerGroup()))));
|
||||
.forEach(a -> resources.add(create(a, Resource.CONSUMER, nameId(ctx.getConsumerGroup()))));
|
||||
ctx.getConnectActions()
|
||||
.forEach(a -> {
|
||||
Map<String, String> resourceId = new LinkedHashMap<>();
|
||||
|
@ -50,16 +55,16 @@ record AuditRecord(String timestamp,
|
|||
if (ctx.getConnector() != null) {
|
||||
resourceId.put("connector", ctx.getConnector());
|
||||
}
|
||||
resources.add(new AuditResource(a.name(), Resource.CONNECT, resourceId));
|
||||
resources.add(create(a, Resource.CONNECT, resourceId));
|
||||
});
|
||||
ctx.getSchemaActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.SCHEMA, nameId(ctx.getSchema()))));
|
||||
.forEach(a -> resources.add(create(a, Resource.SCHEMA, nameId(ctx.getSchema()))));
|
||||
ctx.getKsqlActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.KSQL, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.KSQL, null)));
|
||||
ctx.getAclActions()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.ACL, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.ACL, null)));
|
||||
ctx.getAuditAction()
|
||||
.forEach(a -> resources.add(new AuditResource(a.name(), Resource.AUDIT, null)));
|
||||
.forEach(a -> resources.add(create(a, Resource.AUDIT, null)));
|
||||
return resources;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
package com.provectus.kafka.ui.service.audit;
|
||||
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.AuditProperties.LogLevel.ALTER_ONLY;
|
||||
import static com.provectus.kafka.ui.service.MessagesService.createProducer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.config.auth.AuthenticatedUser;
|
||||
import com.provectus.kafka.ui.config.auth.RbacUser;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.AdminClientService;
|
||||
|
@ -20,6 +20,7 @@ import java.util.Optional;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
|
@ -27,7 +28,9 @@ import org.apache.kafka.clients.producer.ProducerConfig;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.core.context.SecurityContext;
|
||||
import org.springframework.security.core.userdetails.UserDetails;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
@ -80,12 +83,13 @@ public class AuditService implements Closeable {
|
|||
}
|
||||
boolean topicAudit = Optional.ofNullable(auditProps.getTopicAuditEnabled()).orElse(false);
|
||||
boolean consoleAudit = Optional.ofNullable(auditProps.getConsoleAuditEnabled()).orElse(false);
|
||||
boolean alterLogOnly = Optional.ofNullable(auditProps.getLevel()).map(lvl -> lvl == ALTER_ONLY).orElse(true);
|
||||
if (!topicAudit && !consoleAudit) {
|
||||
return Optional.empty();
|
||||
}
|
||||
if (!topicAudit) {
|
||||
log.info("Audit initialization finished for cluster '{}' (console only)", cluster.getName());
|
||||
return Optional.of(consoleOnlyWriter(cluster));
|
||||
return Optional.of(consoleOnlyWriter(cluster, alterLogOnly));
|
||||
}
|
||||
String auditTopicName = Optional.ofNullable(auditProps.getTopic()).orElse(DEFAULT_AUDIT_TOPIC_NAME);
|
||||
boolean topicAuditCanBeDone = createTopicIfNeeded(cluster, acSupplier, auditTopicName, auditProps);
|
||||
|
@ -95,7 +99,7 @@ public class AuditService implements Closeable {
|
|||
"Audit initialization finished for cluster '{}' (console only, topic audit init failed)",
|
||||
cluster.getName()
|
||||
);
|
||||
return Optional.of(consoleOnlyWriter(cluster));
|
||||
return Optional.of(consoleOnlyWriter(cluster, alterLogOnly));
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
|
@ -103,6 +107,7 @@ public class AuditService implements Closeable {
|
|||
return Optional.of(
|
||||
new AuditWriter(
|
||||
cluster.getName(),
|
||||
alterLogOnly,
|
||||
auditTopicName,
|
||||
producerFactory.get(),
|
||||
consoleAudit ? AUDIT_LOGGER : null
|
||||
|
@ -110,8 +115,8 @@ public class AuditService implements Closeable {
|
|||
);
|
||||
}
|
||||
|
||||
private static AuditWriter consoleOnlyWriter(KafkaCluster cluster) {
|
||||
return new AuditWriter(cluster.getName(), null, null, AUDIT_LOGGER);
|
||||
private static AuditWriter consoleOnlyWriter(KafkaCluster cluster, boolean alterLogOnly) {
|
||||
return new AuditWriter(cluster.getName(), alterLogOnly, null, null, AUDIT_LOGGER);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -192,8 +197,11 @@ public class AuditService implements Closeable {
|
|||
if (sig.getContextView().hasKey(key)) {
|
||||
return sig.getContextView().<Mono<SecurityContext>>get(key)
|
||||
.map(context -> context.getAuthentication().getPrincipal())
|
||||
.cast(RbacUser.class)
|
||||
.map(user -> new AuthenticatedUser(user.name(), user.groups()))
|
||||
.cast(UserDetails.class)
|
||||
.map(user -> {
|
||||
var roles = user.getAuthorities().stream().map(GrantedAuthority::getAuthority).collect(Collectors.toSet());
|
||||
return new AuthenticatedUser(user.getUsername(), roles);
|
||||
})
|
||||
.switchIfEmpty(NO_AUTH_USER);
|
||||
} else {
|
||||
return NO_AUTH_USER;
|
||||
|
|
|
@ -18,6 +18,7 @@ import org.slf4j.Logger;
|
|||
|
||||
@Slf4j
|
||||
record AuditWriter(String clusterName,
|
||||
boolean logAlterOperationsOnly,
|
||||
@Nullable String targetTopic,
|
||||
@Nullable KafkaProducer<byte[], byte[]> producer,
|
||||
@Nullable Logger consoleLogger) implements Closeable {
|
||||
|
@ -39,6 +40,10 @@ record AuditWriter(String clusterName,
|
|||
}
|
||||
|
||||
private void write(AuditRecord rec) {
|
||||
if (logAlterOperationsOnly && rec.resources().stream().noneMatch(AuditResource::alter)) {
|
||||
//we should only log alter operations, but this is read-only op
|
||||
return;
|
||||
}
|
||||
String json = rec.toJson();
|
||||
if (consoleLogger != null) {
|
||||
consoleLogger.info(json);
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
package com.provectus.kafka.ui.service.masking;
|
||||
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
|
@ -9,6 +7,7 @@ import com.fasterxml.jackson.databind.node.ContainerNode;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.service.masking.policies.MaskingPolicy;
|
||||
import java.util.List;
|
||||
|
@ -54,7 +53,8 @@ public class DataMasking {
|
|||
Optional.ofNullable(property.getTopicValuesPattern()).map(Pattern::compile).orElse(null),
|
||||
MaskingPolicy.create(property)
|
||||
);
|
||||
}).collect(toList()));
|
||||
}).toList()
|
||||
);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -62,8 +62,17 @@ public class DataMasking {
|
|||
this.masks = masks;
|
||||
}
|
||||
|
||||
public UnaryOperator<String> getMaskingFunction(String topic, Serde.Target target) {
|
||||
var targetMasks = masks.stream().filter(m -> m.shouldBeApplied(topic, target)).collect(toList());
|
||||
public UnaryOperator<TopicMessageDTO> getMaskerForTopic(String topic) {
|
||||
var keyMasker = getMaskingFunction(topic, Serde.Target.KEY);
|
||||
var valMasker = getMaskingFunction(topic, Serde.Target.VALUE);
|
||||
return msg -> msg
|
||||
.key(keyMasker.apply(msg.getKey()))
|
||||
.content(valMasker.apply(msg.getContent()));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
UnaryOperator<String> getMaskingFunction(String topic, Serde.Target target) {
|
||||
var targetMasks = masks.stream().filter(m -> m.shouldBeApplied(topic, target)).toList();
|
||||
if (targetMasks.isEmpty()) {
|
||||
return UnaryOperator.identity();
|
||||
}
|
||||
|
|
|
@ -5,6 +5,8 @@ import static com.provectus.kafka.ui.model.rbac.provider.Provider.Name.GITHUB;
|
|||
import com.provectus.kafka.ui.model.rbac.Role;
|
||||
import com.provectus.kafka.ui.model.rbac.provider.Provider;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -26,6 +28,8 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
private static final String ORGANIZATION_ATTRIBUTE_NAME = "organizations_url";
|
||||
private static final String USERNAME_ATTRIBUTE_NAME = "login";
|
||||
private static final String ORGANIZATION_NAME = "login";
|
||||
private static final String ORGANIZATION = "organization";
|
||||
private static final String TEAM_NAME = "slug";
|
||||
private static final String GITHUB_ACCEPT_HEADER = "application/vnd.github+json";
|
||||
private static final String DUMMY = "dummy";
|
||||
// The number of results (max 100) per page of list organizations for authenticated user.
|
||||
|
@ -46,7 +50,7 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
throw new RuntimeException();
|
||||
}
|
||||
|
||||
Set<String> groupsByUsername = new HashSet<>();
|
||||
Set<String> rolesByUsername = new HashSet<>();
|
||||
String username = principal.getAttribute(USERNAME_ATTRIBUTE_NAME);
|
||||
if (username == null) {
|
||||
log.debug("Github username param is not present");
|
||||
|
@ -59,13 +63,7 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
.filter(s -> s.getType().equals("user"))
|
||||
.anyMatch(s -> s.getValue().equals(username)))
|
||||
.map(Role::getName)
|
||||
.forEach(groupsByUsername::add);
|
||||
}
|
||||
|
||||
String organization = principal.getAttribute(ORGANIZATION_ATTRIBUTE_NAME);
|
||||
if (organization == null) {
|
||||
log.debug("Github organization param is not present");
|
||||
return Mono.just(groupsByUsername);
|
||||
.forEach(rolesByUsername::add);
|
||||
}
|
||||
|
||||
OAuth2UserRequest req = (OAuth2UserRequest) additionalParams.get("request");
|
||||
|
@ -80,8 +78,24 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
.getUserInfoEndpoint()
|
||||
.getUri();
|
||||
}
|
||||
var webClient = WebClient.create(infoEndpoint);
|
||||
|
||||
WebClient webClient = WebClient.create(infoEndpoint);
|
||||
Mono<Set<String>> rolesByOrganization = getOrganizationRoles(principal, additionalParams, acs, webClient);
|
||||
Mono<Set<String>> rolesByTeams = getTeamRoles(webClient, additionalParams, acs);
|
||||
|
||||
return Mono.zip(rolesByOrganization, rolesByTeams)
|
||||
.map((t) -> Stream.of(t.getT1(), t.getT2(), rolesByUsername)
|
||||
.flatMap(Collection::stream)
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
private Mono<Set<String>> getOrganizationRoles(DefaultOAuth2User principal, Map<String, Object> additionalParams,
|
||||
AccessControlService acs, WebClient webClient) {
|
||||
String organization = principal.getAttribute(ORGANIZATION_ATTRIBUTE_NAME);
|
||||
if (organization == null) {
|
||||
log.debug("Github organization param is not present");
|
||||
return Mono.just(Collections.emptySet());
|
||||
}
|
||||
|
||||
final Mono<List<Map<String, Object>>> userOrganizations = webClient
|
||||
.get()
|
||||
|
@ -99,22 +113,76 @@ public class GithubAuthorityExtractor implements ProviderAuthorityExtractor {
|
|||
//@formatter:on
|
||||
|
||||
return userOrganizations
|
||||
.map(orgsMap -> {
|
||||
var groupsByOrg = acs.getRoles()
|
||||
.stream()
|
||||
.filter(role -> role.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.filter(s -> s.getType().equals("organization"))
|
||||
.anyMatch(subject -> orgsMap.stream()
|
||||
.map(org -> org.get(ORGANIZATION_NAME).toString())
|
||||
.distinct()
|
||||
.anyMatch(orgName -> orgName.equalsIgnoreCase(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName);
|
||||
.map(orgsMap -> acs.getRoles()
|
||||
.stream()
|
||||
.filter(role -> role.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.filter(s -> s.getType().equals(ORGANIZATION))
|
||||
.anyMatch(subject -> orgsMap.stream()
|
||||
.map(org -> org.get(ORGANIZATION_NAME).toString())
|
||||
.anyMatch(orgName -> orgName.equalsIgnoreCase(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName)
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
return Stream.concat(groupsByOrg, groupsByUsername.stream()).collect(Collectors.toSet());
|
||||
});
|
||||
@SuppressWarnings("unchecked")
|
||||
private Mono<Set<String>> getTeamRoles(WebClient webClient, Map<String, Object> additionalParams,
|
||||
AccessControlService acs) {
|
||||
|
||||
var requestedTeams = acs.getRoles()
|
||||
.stream()
|
||||
.filter(r -> r.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.anyMatch(s -> s.getType().equals("team")))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
if (requestedTeams.isEmpty()) {
|
||||
log.debug("No roles with github teams found, skipping");
|
||||
return Mono.just(Collections.emptySet());
|
||||
}
|
||||
|
||||
final Mono<List<Map<String, Object>>> rawTeams = webClient
|
||||
.get()
|
||||
.uri(uriBuilder -> uriBuilder.path("/teams")
|
||||
.queryParam("per_page", ORGANIZATIONS_PER_PAGE)
|
||||
.build())
|
||||
.headers(headers -> {
|
||||
headers.set(HttpHeaders.ACCEPT, GITHUB_ACCEPT_HEADER);
|
||||
OAuth2UserRequest request = (OAuth2UserRequest) additionalParams.get("request");
|
||||
headers.setBearerAuth(request.getAccessToken().getTokenValue());
|
||||
})
|
||||
.retrieve()
|
||||
//@formatter:off
|
||||
.bodyToMono(new ParameterizedTypeReference<>() {});
|
||||
//@formatter:on
|
||||
|
||||
final Mono<List<String>> mappedTeams = rawTeams
|
||||
.map(teams -> teams.stream()
|
||||
.map(teamInfo -> {
|
||||
var name = teamInfo.get(TEAM_NAME);
|
||||
var orgInfo = (Map<String, Object>) teamInfo.get(ORGANIZATION);
|
||||
var orgName = orgInfo.get(ORGANIZATION_NAME);
|
||||
return orgName + "/" + name;
|
||||
})
|
||||
.map(Object::toString)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
|
||||
return mappedTeams
|
||||
.map(teams -> acs.getRoles()
|
||||
.stream()
|
||||
.filter(role -> role.getSubjects()
|
||||
.stream()
|
||||
.filter(s -> s.getProvider().equals(Provider.OAUTH_GITHUB))
|
||||
.filter(s -> s.getType().equals("team"))
|
||||
.anyMatch(subject -> teams.stream()
|
||||
.anyMatch(teamName -> teamName.equalsIgnoreCase(subject.getValue()))
|
||||
))
|
||||
.map(Role::getName)
|
||||
.collect(Collectors.toSet()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.time.OffsetDateTime;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.header.internals.RecordHeaders;
|
||||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.RepeatedTest;
|
||||
|
||||
class MessagesProcessingTest {
|
||||
|
||||
|
||||
@RepeatedTest(5)
|
||||
void testSortingAsc() {
|
||||
var messagesInOrder = List.of(
|
||||
consumerRecord(1, 100L, "1999-01-01T00:00:00+00:00"),
|
||||
consumerRecord(0, 0L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(1, 200L, "2000-01-05T00:00:00+00:00"),
|
||||
consumerRecord(0, 10L, "2000-01-10T00:00:00+00:00"),
|
||||
consumerRecord(0, 20L, "2000-01-20T00:00:00+00:00"),
|
||||
consumerRecord(1, 300L, "3000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1000L, "4000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1001L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1003L, "3000-01-01T00:00:00+00:00")
|
||||
);
|
||||
|
||||
var shuffled = new ArrayList<>(messagesInOrder);
|
||||
Collections.shuffle(shuffled);
|
||||
|
||||
var sortedList = MessagesProcessing.sortForSending(shuffled, true);
|
||||
assertThat(sortedList).containsExactlyElementsOf(messagesInOrder);
|
||||
}
|
||||
|
||||
@RepeatedTest(5)
|
||||
void testSortingDesc() {
|
||||
var messagesInOrder = List.of(
|
||||
consumerRecord(1, 300L, "3000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1003L, "3000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(0, 20L, "2000-01-20T00:00:00+00:00"),
|
||||
consumerRecord(0, 10L, "2000-01-10T00:00:00+00:00"),
|
||||
consumerRecord(1, 200L, "2000-01-05T00:00:00+00:00"),
|
||||
consumerRecord(0, 0L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1001L, "2000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(2, 1000L, "4000-01-01T00:00:00+00:00"),
|
||||
consumerRecord(1, 100L, "1999-01-01T00:00:00+00:00")
|
||||
);
|
||||
|
||||
var shuffled = new ArrayList<>(messagesInOrder);
|
||||
Collections.shuffle(shuffled);
|
||||
|
||||
var sortedList = MessagesProcessing.sortForSending(shuffled, false);
|
||||
assertThat(sortedList).containsExactlyElementsOf(messagesInOrder);
|
||||
}
|
||||
|
||||
private ConsumerRecord<Bytes, Bytes> consumerRecord(int partition, long offset, String ts) {
|
||||
return new ConsumerRecord<>(
|
||||
"topic", partition, offset, OffsetDateTime.parse(ts).toInstant().toEpochMilli(),
|
||||
TimestampType.CREATE_TIME,
|
||||
0, 0, null, null, new RecordHeaders(), Optional.empty()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package com.provectus.kafka.ui.serdes;
|
||||
|
||||
import static com.provectus.kafka.ui.serde.api.DeserializeResult.Type.STRING;
|
||||
import static org.mockito.Mockito.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.verify;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import java.util.Map;
|
||||
import java.util.function.UnaryOperator;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class ConsumerRecordDeserializerTest {
|
||||
|
||||
@Test
|
||||
void dataMaskingAppliedOnDeserializedMessage() {
|
||||
UnaryOperator<TopicMessageDTO> maskerMock = mock();
|
||||
Serde.Deserializer deser = (headers, data) -> new DeserializeResult("test", STRING, Map.of());
|
||||
|
||||
var recordDeser = new ConsumerRecordDeserializer("test", deser, "test", deser, "test", deser, deser, maskerMock);
|
||||
recordDeser.deserialize(new ConsumerRecord<>("t", 1, 1L, Bytes.wrap("t".getBytes()), Bytes.wrap("t".getBytes())));
|
||||
|
||||
verify(maskerMock).apply(any(TopicMessageDTO.class));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||
|
||||
import com.google.protobuf.DescriptorProtos;
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class ProtobufRawSerdeTest {
|
||||
|
||||
private static final String DUMMY_TOPIC = "dummy-topic";
|
||||
|
||||
private ProtobufRawSerde serde;
|
||||
|
||||
@BeforeEach
|
||||
void init() {
|
||||
serde = new ProtobufRawSerde();
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
ProtobufSchema getSampleSchema() {
|
||||
return new ProtobufSchema(
|
||||
"""
|
||||
syntax = "proto3";
|
||||
message Message1 {
|
||||
int32 my_field = 1;
|
||||
}
|
||||
"""
|
||||
);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private byte[] getProtobufMessage() {
|
||||
DynamicMessage.Builder builder = DynamicMessage.newBuilder(getSampleSchema().toDescriptor("Message1"));
|
||||
builder.setField(builder.getDescriptorForType().findFieldByName("my_field"), 5);
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeSimpleMessage() {
|
||||
var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE)
|
||||
.deserialize(null, getProtobufMessage());
|
||||
assertThat(deserialized.getResult()).isEqualTo("1: 5\n");
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeEmptyMessage() {
|
||||
var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE)
|
||||
.deserialize(null, new byte[0]);
|
||||
assertThat(deserialized.getResult()).isEqualTo("");
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeInvalidMessage() {
|
||||
var deserializer = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE);
|
||||
assertThatThrownBy(() -> deserializer.deserialize(null, new byte[] { 1, 2, 3 }))
|
||||
.isInstanceOf(ValidationException.class)
|
||||
.hasMessageContaining("Protocol message contained an invalid tag");
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeNullMessage() {
|
||||
var deserializer = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE);
|
||||
assertThatThrownBy(() -> deserializer.deserialize(null, null))
|
||||
.isInstanceOf(ValidationException.class)
|
||||
.hasMessageContaining("Cannot read the array length");
|
||||
}
|
||||
|
||||
ProtobufSchema getSampleNestedSchema() {
|
||||
return new ProtobufSchema(
|
||||
"""
|
||||
syntax = "proto3";
|
||||
message Message2 {
|
||||
int32 my_nested_field = 1;
|
||||
}
|
||||
message Message1 {
|
||||
int32 my_field = 1;
|
||||
Message2 my_nested_message = 2;
|
||||
}
|
||||
"""
|
||||
);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private byte[] getComplexProtobufMessage() {
|
||||
DynamicMessage.Builder builder = DynamicMessage.newBuilder(getSampleNestedSchema().toDescriptor("Message1"));
|
||||
builder.setField(builder.getDescriptorForType().findFieldByName("my_field"), 5);
|
||||
DynamicMessage.Builder nestedBuilder = DynamicMessage.newBuilder(getSampleNestedSchema().toDescriptor("Message2"));
|
||||
nestedBuilder.setField(nestedBuilder.getDescriptorForType().findFieldByName("my_nested_field"), 10);
|
||||
builder.setField(builder.getDescriptorForType().findFieldByName("my_nested_message"), nestedBuilder.build());
|
||||
|
||||
return builder.build().toByteArray();
|
||||
}
|
||||
|
||||
@Test
|
||||
void deserializeNestedMessage() {
|
||||
var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE)
|
||||
.deserialize(null, getComplexProtobufMessage());
|
||||
assertThat(deserialized.getResult()).isEqualTo("1: 5\n2: {\n 1: 10\n}\n");
|
||||
}
|
||||
}
|
|
@ -7,13 +7,13 @@ import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
|
|||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.AbstractIntegrationTest;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.PollingSettings;
|
||||
import com.provectus.kafka.ui.emitter.PollingThrottler;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.producer.KafkaTestProducer;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
|
@ -31,16 +31,15 @@ import java.util.UUID;
|
|||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Value;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.internals.RecordHeader;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.junit.jupiter.api.AfterAll;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
@ -58,6 +57,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
static final String EMPTY_TOPIC = TOPIC + "_empty";
|
||||
static final List<Record> SENT_RECORDS = new ArrayList<>();
|
||||
static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer();
|
||||
static final Predicate<TopicMessageDTO> NOOP_FILTER = m -> true;
|
||||
|
||||
@BeforeAll
|
||||
static void generateMsgs() throws Exception {
|
||||
|
@ -93,6 +93,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
static void cleanup() {
|
||||
deleteTopic(TOPIC);
|
||||
deleteTopic(EMPTY_TOPIC);
|
||||
SENT_RECORDS.clear();
|
||||
}
|
||||
|
||||
private static ConsumerRecordDeserializer createRecordsDeserializer() {
|
||||
|
@ -105,28 +106,28 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
s.deserializer(null, Serde.Target.VALUE),
|
||||
StringSerde.name(),
|
||||
s.deserializer(null, Serde.Target.KEY),
|
||||
s.deserializer(null, Serde.Target.VALUE)
|
||||
s.deserializer(null, Serde.Target.VALUE),
|
||||
msg -> msg
|
||||
);
|
||||
}
|
||||
|
||||
private MessagesProcessing createMessagesProcessing() {
|
||||
return new MessagesProcessing(RECORD_DESERIALIZER, msg -> true, null);
|
||||
}
|
||||
|
||||
@Test
|
||||
void pollNothingOnEmptyTopic() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
createMessagesProcessing(),
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
100,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
100,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -145,18 +146,21 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
@Test
|
||||
void pollFullTopicFromBeginning() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(BEGINNING, TOPIC, null),
|
||||
createMessagesProcessing(),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(LATEST, TOPIC, null),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -174,18 +178,21 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
targetOffsets.put(new TopicPartition(TOPIC, i), offset);
|
||||
}
|
||||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
createMessagesProcessing(),
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -219,18 +226,21 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
);
|
||||
}
|
||||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
createMessagesProcessing(),
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var forwardEmitter = new ForwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -257,11 +267,12 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
targetOffsets.put(new TopicPartition(TOPIC, i), (long) MSGS_PER_PARTITION);
|
||||
}
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
numMessages,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
@ -283,11 +294,12 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
offsets.put(new TopicPartition(TOPIC, i), 0L);
|
||||
}
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
var backwardEmitter = new BackwardEmitter(
|
||||
this::createConsumer,
|
||||
new ConsumerPosition(OFFSET, TOPIC, offsets),
|
||||
100,
|
||||
createMessagesProcessing(),
|
||||
RECORD_DESERIALIZER,
|
||||
NOOP_FILTER,
|
||||
PollingSettings.createDefault()
|
||||
);
|
||||
|
||||
|
|
|
@ -42,8 +42,9 @@ public class SchemaRegistryPaginationTest {
|
|||
new SchemaRegistryService.SubjectWithCompatibilityLevel(
|
||||
new SchemaSubject().subject(a.getArgument(1)), Compatibility.FULL)));
|
||||
|
||||
this.controller = new SchemasController(schemaRegistryService, new AccessControlServiceMock().getMock(),
|
||||
mock(AuditService.class));
|
||||
this.controller = new SchemasController(schemaRegistryService);
|
||||
this.controller.setAccessControlService(new AccessControlServiceMock().getMock());
|
||||
this.controller.setAuditService(mock(AuditService.class));
|
||||
this.controller.setClustersStorage(clustersStorage);
|
||||
}
|
||||
|
||||
|
|
|
@ -45,8 +45,8 @@ class TopicsServicePaginationTest {
|
|||
private final ClusterMapper clusterMapper = new ClusterMapperImpl();
|
||||
private final AccessControlService accessControlService = new AccessControlServiceMock().getMock();
|
||||
|
||||
private final TopicsController topicsController = new TopicsController(
|
||||
topicsService, mock(TopicAnalysisService.class), clusterMapper, accessControlService, mock(AuditService.class));
|
||||
private final TopicsController topicsController =
|
||||
new TopicsController(topicsService, mock(TopicAnalysisService.class), clusterMapper);
|
||||
|
||||
private void init(Map<String, InternalTopic> topicsInCache) {
|
||||
|
||||
|
@ -59,6 +59,8 @@ class TopicsServicePaginationTest {
|
|||
List<String> lst = a.getArgument(1);
|
||||
return Mono.just(lst.stream().map(topicsInCache::get).collect(Collectors.toList()));
|
||||
});
|
||||
topicsController.setAccessControlService(accessControlService);
|
||||
topicsController.setAuditService(mock(AuditService.class));
|
||||
topicsController.setClustersStorage(clustersStorage);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,8 +30,8 @@ class AuditServiceTest {
|
|||
@Test
|
||||
void isAuditTopicChecksIfAuditIsEnabledForCluster() {
|
||||
Map<String, AuditWriter> writers = Map.of(
|
||||
"c1", new AuditWriter("с1", "c1topic", null, null),
|
||||
"c2", new AuditWriter("c2", "c2topic", mock(KafkaProducer.class), null)
|
||||
"c1", new AuditWriter("с1", true, "c1topic", null, null),
|
||||
"c2", new AuditWriter("c2", false, "c2topic", mock(KafkaProducer.class), null)
|
||||
);
|
||||
|
||||
var auditService = new AuditService(writers);
|
||||
|
@ -79,6 +79,17 @@ class AuditServiceTest {
|
|||
.thenReturn(mock(KafkaProducer.class));
|
||||
}
|
||||
|
||||
@Test
|
||||
void logOnlyAlterOpsByDefault() {
|
||||
var auditProps = new ClustersProperties.AuditProperties();
|
||||
auditProps.setConsoleAuditEnabled(true);
|
||||
clustersProperties.setAudit(auditProps);
|
||||
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
assertThat(maybeWriter)
|
||||
.hasValueSatisfying(w -> assertThat(w.logAlterOperationsOnly()).isTrue());
|
||||
}
|
||||
|
||||
@Test
|
||||
void noWriterIfNoAuditPropsSet() {
|
||||
var maybeWriter = createAuditWriter(cluster, () -> adminClientMock, producerSupplierMock);
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
package com.provectus.kafka.ui.service.audit;
|
||||
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyNoInteractions;
|
||||
|
||||
import com.provectus.kafka.ui.config.auth.AuthenticatedUser;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext.AccessContextBuilder;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import java.util.List;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
import org.mockito.Mockito;
|
||||
import org.slf4j.Logger;
|
||||
|
||||
class AuditWriterTest {
|
||||
|
||||
final KafkaProducer<byte[], byte[]> producerMock = Mockito.mock(KafkaProducer.class);
|
||||
final Logger loggerMock = Mockito.mock(Logger.class);
|
||||
final AuthenticatedUser user = new AuthenticatedUser("someone", List.of());
|
||||
|
||||
@Nested
|
||||
class AlterOperationsOnlyWriter {
|
||||
|
||||
final AuditWriter alterOnlyWriter = new AuditWriter("test", true, "test-topic", producerMock, loggerMock);
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource
|
||||
void onlyLogsWhenAlterOperationIsPresentForOneOfResources(AccessContext ctxWithAlterOperation) {
|
||||
alterOnlyWriter.write(ctxWithAlterOperation, user, null);
|
||||
verify(producerMock).send(any(), any());
|
||||
verify(loggerMock).info(any());
|
||||
}
|
||||
|
||||
static Stream<AccessContext> onlyLogsWhenAlterOperationIsPresentForOneOfResources() {
|
||||
Stream<UnaryOperator<AccessContextBuilder>> topicEditActions =
|
||||
TopicAction.ALTER_ACTIONS.stream().map(a -> c -> c.topic("test").topicActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> clusterConfigEditActions =
|
||||
ClusterConfigAction.ALTER_ACTIONS.stream().map(a -> c -> c.clusterConfigActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> aclEditActions =
|
||||
AclAction.ALTER_ACTIONS.stream().map(a -> c -> c.aclActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> cgEditActions =
|
||||
ConsumerGroupAction.ALTER_ACTIONS.stream().map(a -> c -> c.consumerGroup("cg").consumerGroupActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> schemaEditActions =
|
||||
SchemaAction.ALTER_ACTIONS.stream().map(a -> c -> c.schema("sc").schemaActions(a));
|
||||
Stream<UnaryOperator<AccessContextBuilder>> connEditActions =
|
||||
ConnectAction.ALTER_ACTIONS.stream().map(a -> c -> c.connect("conn").connectActions(a));
|
||||
return Stream.of(
|
||||
topicEditActions, clusterConfigEditActions, aclEditActions,
|
||||
cgEditActions, connEditActions, schemaEditActions
|
||||
)
|
||||
.flatMap(c -> c)
|
||||
.map(setter -> setter.apply(AccessContext.builder().cluster("test").operationName("test")).build());
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource
|
||||
void doesNothingIfNoResourceHasAlterAction(AccessContext readOnlyCxt) {
|
||||
alterOnlyWriter.write(readOnlyCxt, user, null);
|
||||
verifyNoInteractions(producerMock);
|
||||
verifyNoInteractions(loggerMock);
|
||||
}
|
||||
|
||||
static Stream<AccessContext> doesNothingIfNoResourceHasAlterAction() {
|
||||
return Stream.<UnaryOperator<AccessContextBuilder>>of(
|
||||
c -> c.topic("test").topicActions(TopicAction.VIEW),
|
||||
c -> c.clusterConfigActions(ClusterConfigAction.VIEW),
|
||||
c -> c.aclActions(AclAction.VIEW),
|
||||
c -> c.consumerGroup("cg").consumerGroupActions(ConsumerGroupAction.VIEW),
|
||||
c -> c.schema("sc").schemaActions(SchemaAction.VIEW),
|
||||
c -> c.connect("conn").connectActions(ConnectAction.VIEW)
|
||||
).map(setter -> setter.apply(AccessContext.builder().cluster("test").operationName("test")).build());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -3868,10 +3868,6 @@ components:
|
|||
properties:
|
||||
pollTimeoutMs:
|
||||
type: integer
|
||||
partitionPollTimeout:
|
||||
type: integer
|
||||
noDataEmptyPolls:
|
||||
type: integer
|
||||
maxPageSize:
|
||||
type: integer
|
||||
defaultPageSize:
|
||||
|
@ -4026,6 +4022,9 @@ components:
|
|||
audit:
|
||||
type: object
|
||||
properties:
|
||||
level:
|
||||
type: string
|
||||
enum: [ "ALL", "ALTER_ONLY" ]
|
||||
topic:
|
||||
type: string
|
||||
auditTopicsPartitions:
|
||||
|
|
|
@ -19,6 +19,8 @@ import BrokerLogdir from 'components/Brokers/Broker/BrokerLogdir/BrokerLogdir';
|
|||
import BrokerMetrics from 'components/Brokers/Broker/BrokerMetrics/BrokerMetrics';
|
||||
import Navbar from 'components/common/Navigation/Navbar.styled';
|
||||
import PageLoader from 'components/common/PageLoader/PageLoader';
|
||||
import { ActionNavLink } from 'components/common/ActionComponent';
|
||||
import { Action, ResourceType } from 'generated-sources';
|
||||
|
||||
import Configs from './Configs/Configs';
|
||||
|
||||
|
@ -71,12 +73,16 @@ const Broker: React.FC = () => {
|
|||
>
|
||||
Configs
|
||||
</NavLink>
|
||||
<NavLink
|
||||
<ActionNavLink
|
||||
to={clusterBrokerMetricsPath(clusterName, brokerId)}
|
||||
className={({ isActive }) => (isActive ? 'is-active' : '')}
|
||||
permission={{
|
||||
resource: ResourceType.CLUSTERCONFIG,
|
||||
action: Action.VIEW,
|
||||
}}
|
||||
>
|
||||
Metrics
|
||||
</NavLink>
|
||||
</ActionNavLink>
|
||||
</Navbar>
|
||||
<Suspense fallback={<PageLoader />}>
|
||||
<Routes>
|
||||
|
|
|
@ -186,7 +186,7 @@ const Form: React.FC<FormProps> = ({ defaultValues, partitions, topics }) => {
|
|||
type="submit"
|
||||
disabled={partitionsValue.length === 0}
|
||||
>
|
||||
Submit
|
||||
Reset Offsets
|
||||
</Button>
|
||||
</div>
|
||||
</StyledForm>
|
||||
|
|
|
@ -15,6 +15,7 @@ import Form from './Form';
|
|||
const ResetOffsets: React.FC = () => {
|
||||
const routerParams = useAppParams<ClusterGroupParam>();
|
||||
|
||||
const { consumerGroupID } = routerParams;
|
||||
const consumerGroup = useConsumerGroupDetails(routerParams);
|
||||
|
||||
if (consumerGroup.isLoading || !consumerGroup.isSuccess)
|
||||
|
@ -37,7 +38,7 @@ const ResetOffsets: React.FC = () => {
|
|||
return (
|
||||
<>
|
||||
<PageHeading
|
||||
text="Reset offsets"
|
||||
text={consumerGroupID}
|
||||
backTo={clusterConsumerGroupsPath(routerParams.clusterName)}
|
||||
backText="Consumers"
|
||||
/>
|
||||
|
|
|
@ -86,9 +86,9 @@ const ActionsCell: React.FC<CellContext<Topic, unknown>> = ({ row }) => {
|
|||
Remove Topic
|
||||
{!isTopicDeletionAllowed && (
|
||||
<DropdownItemHint>
|
||||
The topic deletion is restricted at the application
|
||||
The topic deletion is restricted at the broker
|
||||
<br />
|
||||
configuration level
|
||||
configuration level (delete.topic.enable = false)
|
||||
</DropdownItemHint>
|
||||
)}
|
||||
</ActionDropdownItem>
|
||||
|
|
|
@ -15,7 +15,7 @@ enum Filters {
|
|||
PARTITION_COUNT = 'partitionCount',
|
||||
REPLICATION_FACTOR = 'replicationFactor',
|
||||
INSYNC_REPLICAS = 'inSyncReplicas',
|
||||
CLEANUP_POLICY = 'Delete',
|
||||
CLEANUP_POLICY = 'cleanUpPolicy',
|
||||
}
|
||||
|
||||
const New: React.FC = () => {
|
||||
|
|
|
@ -162,9 +162,9 @@ const Topic: React.FC = () => {
|
|||
Remove Topic
|
||||
{!isTopicDeletionAllowed && (
|
||||
<DropdownItemHint>
|
||||
The topic deletion is restricted at the application
|
||||
The topic deletion is restricted at the broker
|
||||
<br />
|
||||
configuration level
|
||||
configuration level (delete.topic.enable = false)
|
||||
</DropdownItemHint>
|
||||
)}
|
||||
</ActionDropdownItem>
|
||||
|
|
|
@ -7,11 +7,13 @@ export const clusterName = 'local';
|
|||
export const validPermission = {
|
||||
resource: ResourceType.TOPIC,
|
||||
action: Action.CREATE,
|
||||
value: 'topic',
|
||||
};
|
||||
|
||||
export const invalidPermission = {
|
||||
resource: ResourceType.SCHEMA,
|
||||
action: Action.DELETE,
|
||||
value: 'test',
|
||||
};
|
||||
|
||||
const roles = [
|
||||
|
|
|
@ -14,6 +14,7 @@ describe('Permission Helpers', () => {
|
|||
clusters: [clusterName1],
|
||||
resource: ResourceType.TOPIC,
|
||||
actions: [Action.VIEW, Action.CREATE],
|
||||
value: '.*',
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1],
|
||||
|
@ -24,11 +25,18 @@ describe('Permission Helpers', () => {
|
|||
clusters: [clusterName1, clusterName2],
|
||||
resource: ResourceType.SCHEMA,
|
||||
actions: [Action.VIEW],
|
||||
value: '.*',
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1, clusterName2],
|
||||
resource: ResourceType.CONNECT,
|
||||
actions: [Action.VIEW],
|
||||
value: '.*',
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1],
|
||||
resource: ResourceType.APPLICATIONCONFIG,
|
||||
actions: [Action.EDIT],
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1],
|
||||
|
@ -39,6 +47,7 @@ describe('Permission Helpers', () => {
|
|||
clusters: [clusterName1],
|
||||
resource: ResourceType.CONSUMER,
|
||||
actions: [Action.DELETE],
|
||||
value: '.*',
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1],
|
||||
|
@ -46,6 +55,16 @@ describe('Permission Helpers', () => {
|
|||
actions: [Action.EDIT, Action.DELETE, Action.CREATE],
|
||||
value: '123.*',
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1],
|
||||
resource: ResourceType.ACL,
|
||||
actions: [Action.VIEW],
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1],
|
||||
resource: ResourceType.AUDIT,
|
||||
actions: [Action.VIEW],
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1, clusterName2],
|
||||
resource: ResourceType.TOPIC,
|
||||
|
@ -58,6 +77,12 @@ describe('Permission Helpers', () => {
|
|||
value: '.*',
|
||||
actions: [Action.EDIT, Action.DELETE],
|
||||
},
|
||||
{
|
||||
clusters: [clusterName1, clusterName2],
|
||||
resource: ResourceType.TOPIC,
|
||||
value: 'bobross.*',
|
||||
actions: [Action.VIEW, Action.MESSAGES_READ],
|
||||
},
|
||||
];
|
||||
|
||||
const roles = modifyRolesData(userPermissionsMock);
|
||||
|
@ -100,11 +125,11 @@ describe('Permission Helpers', () => {
|
|||
|
||||
expect(result.size).toBe(2);
|
||||
|
||||
expect(cluster1Map?.size).toBe(6);
|
||||
expect(cluster1Map?.size).toBe(9);
|
||||
expect(cluster2Map?.size).toBe(3);
|
||||
|
||||
// clusterMap1
|
||||
expect(cluster1Map?.get(ResourceType.TOPIC)).toHaveLength(3);
|
||||
expect(cluster1Map?.get(ResourceType.TOPIC)).toHaveLength(4);
|
||||
expect(cluster1Map?.get(ResourceType.SCHEMA)).toHaveLength(2);
|
||||
expect(cluster1Map?.get(ResourceType.CONSUMER)).toHaveLength(1);
|
||||
expect(cluster1Map?.get(ResourceType.CLUSTERCONFIG)).toHaveLength(1);
|
||||
|
@ -177,33 +202,13 @@ describe('Permission Helpers', () => {
|
|||
).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should check if the isPermitted returns the correct value without name values', () => {
|
||||
it('should check if the isPermitted returns the correct value without resource values (exempt list)', () => {
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.TOPIC,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName2,
|
||||
resource: ResourceType.TOPIC,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeFalsy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.SCHEMA,
|
||||
action: Action.VIEW,
|
||||
resource: ResourceType.KSQL,
|
||||
action: Action.EXECUTE,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
@ -222,8 +227,8 @@ describe('Permission Helpers', () => {
|
|||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.KSQL,
|
||||
action: Action.EXECUTE,
|
||||
resource: ResourceType.APPLICATIONCONFIG,
|
||||
action: Action.EDIT,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
@ -231,23 +236,33 @@ describe('Permission Helpers', () => {
|
|||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName2,
|
||||
resource: ResourceType.KSQL,
|
||||
action: Action.EXECUTE,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.ACL,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.AUDIT,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.TOPIC,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeFalsy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName2,
|
||||
resource: ResourceType.SCHEMA,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
|
@ -256,17 +271,17 @@ describe('Permission Helpers', () => {
|
|||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
).toBeFalsy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName2,
|
||||
resource: ResourceType.CONNECT,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.CONSUMER,
|
||||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
).toBeFalsy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
|
@ -276,7 +291,7 @@ describe('Permission Helpers', () => {
|
|||
action: Action.VIEW,
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
).toBeFalsy();
|
||||
});
|
||||
|
||||
it('should check if the isPermitted returns the correct value with name values', () => {
|
||||
|
@ -445,7 +460,7 @@ describe('Permission Helpers', () => {
|
|||
value: '123456',
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeFalsy();
|
||||
).toBeTruthy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
|
@ -468,6 +483,17 @@ describe('Permission Helpers', () => {
|
|||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
|
||||
expect(
|
||||
isPermitted({
|
||||
roles,
|
||||
clusterName: clusterName1,
|
||||
resource: ResourceType.TOPIC,
|
||||
action: [Action.MESSAGES_READ],
|
||||
value: 'bobross-test',
|
||||
rbacFlag: true,
|
||||
})
|
||||
).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should check the rbac flag and works with permissions accordingly', () => {
|
||||
|
|
|
@ -76,7 +76,6 @@ const formatTopicCreation = (form: TopicFormData): TopicCreation => {
|
|||
partitions,
|
||||
replicationFactor,
|
||||
cleanupPolicy,
|
||||
retentionBytes,
|
||||
retentionMs,
|
||||
maxMessageBytes,
|
||||
minInSyncReplicas,
|
||||
|
@ -86,7 +85,6 @@ const formatTopicCreation = (form: TopicFormData): TopicCreation => {
|
|||
const configs = {
|
||||
'cleanup.policy': cleanupPolicy,
|
||||
'retention.ms': retentionMs.toString(),
|
||||
'retention.bytes': retentionBytes.toString(),
|
||||
'max.message.bytes': maxMessageBytes.toString(),
|
||||
'min.insync.replicas': minInSyncReplicas.toString(),
|
||||
...Object.values(customParams || {}).reduce(topicReducer, {}),
|
||||
|
|
|
@ -1,9 +1,17 @@
|
|||
import { Action, UserPermission, ResourceType } from 'generated-sources';
|
||||
import { Action, ResourceType, UserPermission } from 'generated-sources';
|
||||
|
||||
export type RolesType = UserPermission[];
|
||||
|
||||
export type RolesModifiedTypes = Map<string, Map<ResourceType, RolesType>>;
|
||||
|
||||
const ResourceExemptList: ResourceType[] = [
|
||||
ResourceType.KSQL,
|
||||
ResourceType.CLUSTERCONFIG,
|
||||
ResourceType.APPLICATIONCONFIG,
|
||||
ResourceType.ACL,
|
||||
ResourceType.AUDIT,
|
||||
];
|
||||
|
||||
export function modifyRolesData(
|
||||
data?: RolesType
|
||||
): Map<string, Map<ResourceType, RolesType>> {
|
||||
|
@ -39,6 +47,12 @@ interface IsPermittedConfig {
|
|||
rbacFlag: boolean;
|
||||
}
|
||||
|
||||
const valueMatches = (regexp: string | undefined, val: string | undefined) => {
|
||||
if (!val) return false;
|
||||
if (!regexp) return true;
|
||||
return new RegExp(regexp).test(val);
|
||||
};
|
||||
|
||||
/**
|
||||
* @description it the logic behind depending on the roles whether a certain action
|
||||
* is permitted or not the philosophy is inspired from Headless UI libraries where
|
||||
|
@ -83,32 +97,18 @@ export function isPermitted({
|
|||
if (!clusterMap) return false;
|
||||
|
||||
// short circuit
|
||||
const resourceData = clusterMap.get(resource);
|
||||
if (!resourceData) return false;
|
||||
const resourcePermissions = clusterMap.get(resource);
|
||||
if (!resourcePermissions) return false;
|
||||
|
||||
return (
|
||||
resourceData.findIndex((item) => {
|
||||
let valueCheck = true;
|
||||
if (item.value) {
|
||||
valueCheck = false;
|
||||
const actions = Array.isArray(action) ? action : [action];
|
||||
|
||||
if (value) valueCheck = new RegExp(item.value).test(value);
|
||||
}
|
||||
|
||||
// short circuit
|
||||
if (!valueCheck) return false;
|
||||
|
||||
if (!Array.isArray(action)) {
|
||||
return item.actions.includes(action);
|
||||
}
|
||||
|
||||
// every given action should be found in that resource
|
||||
return action.every(
|
||||
(currentAction) =>
|
||||
item.actions.findIndex((element) => element === currentAction) !== -1
|
||||
);
|
||||
}) !== -1
|
||||
);
|
||||
return actions.every((a) => {
|
||||
return resourcePermissions.some((item) => {
|
||||
if (!item.actions.includes(a)) return false;
|
||||
if (ResourceExemptList.includes(resource)) return true;
|
||||
return valueMatches(item.value, value);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -44,7 +44,6 @@ export interface TopicFormData {
|
|||
minInSyncReplicas: number;
|
||||
cleanupPolicy: string;
|
||||
retentionMs: number;
|
||||
retentionBytes: number;
|
||||
maxMessageBytes: number;
|
||||
customParams: {
|
||||
name: string;
|
||||
|
|
Loading…
Add table
Reference in a new issue