Compare commits
No commits in common. "master" and "fix/issues-in-workflows" have entirely different histories.
master
...
fix/issues
191 changed files with 3578 additions and 5776 deletions
2
.github/workflows/aws_publisher.yaml
vendored
2
.github/workflows/aws_publisher.yaml
vendored
|
@ -31,7 +31,7 @@ jobs:
|
|||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
5
.github/workflows/backend.yml
vendored
5
.github/workflows/backend.yml
vendored
|
@ -8,9 +8,6 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-api/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -32,7 +29,7 @@ jobs:
|
|||
key: ${{ runner.os }}-sonar
|
||||
restore-keys: ${{ runner.os }}-sonar
|
||||
- name: Build and analyze pull request target
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
|
||||
|
|
2
.github/workflows/branch-deploy.yml
vendored
2
.github/workflows/branch-deploy.yml
vendored
|
@ -45,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/build-public-image.yml
vendored
2
.github/workflows/build-public-image.yml
vendored
|
@ -42,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/cve.yaml
vendored
2
.github/workflows/cve.yaml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Run CVE checks
|
||||
uses: aquasecurity/trivy-action@0.12.0
|
||||
uses: aquasecurity/trivy-action@0.11.2
|
||||
with:
|
||||
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
|
||||
format: "table"
|
||||
|
|
2
.github/workflows/delete-public-image.yml
vendored
2
.github/workflows/delete-public-image.yml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-automation.yml
vendored
2
.github/workflows/e2e-automation.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
8
.github/workflows/e2e-checks.yaml
vendored
8
.github/workflows/e2e-checks.yaml
vendored
|
@ -8,8 +8,6 @@ on:
|
|||
- "kafka-ui-react-app/**"
|
||||
- "kafka-ui-e2e-checks/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
statuses: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -18,10 +16,10 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-central-1
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
|
|
2
.github/workflows/e2e-weekly.yml
vendored
2
.github/workflows/e2e-weekly.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
13
.github/workflows/frontend.yaml
vendored
13
.github/workflows/frontend.yaml
vendored
|
@ -8,9 +8,6 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-contract/**"
|
||||
- "kafka-ui-react-app/**"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
env:
|
||||
|
@ -23,13 +20,13 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
with:
|
||||
version: 8.6.12
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.8.1
|
||||
uses: actions/setup-node@v3.6.0
|
||||
with:
|
||||
node-version: "18.17.1"
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
|
||||
- name: Install Node dependencies
|
||||
|
@ -49,7 +46,7 @@ jobs:
|
|||
cd kafka-ui-react-app/
|
||||
pnpm test:CI
|
||||
- name: SonarCloud Scan
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
uses: workshur/sonarcloud-github-action@improved_basedir
|
||||
with:
|
||||
projectBaseDir: ./kafka-ui-react-app
|
||||
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
|
||||
|
|
3
.github/workflows/pr-checks.yaml
vendored
3
.github/workflows/pr-checks.yaml
vendored
|
@ -2,8 +2,7 @@ name: "PR: Checklist linter"
|
|||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
permissions:
|
||||
checks: write
|
||||
|
||||
jobs:
|
||||
task-check:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
|
@ -34,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
uses: svenstaro/upload-release-action@2.6.1
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
|
|
@ -47,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/terraform-deploy.yml
vendored
2
.github/workflows/terraform-deploy.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
|||
echo "Terraform will be triggered in this dir $TF_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -7,9 +7,7 @@ on:
|
|||
issues:
|
||||
types:
|
||||
- opened
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
welcome:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
|
||||
</p>
|
||||
|
||||
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
|
||||
|
||||
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
|
@ -91,7 +87,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
|
|||
|
||||
Then access the web UI at [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
|
||||
|
||||
## Persistent installation
|
||||
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
#FROM azul/zulu-openjdk-alpine:17-jre-headless
|
||||
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
|
||||
|
||||
RUN apk add --no-cache \
|
||||
# snappy codec
|
||||
gcompat \
|
||||
# configuring timezones
|
||||
tzdata
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
|
|
|
@ -81,12 +81,6 @@
|
|||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-json-schema-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
|
@ -97,7 +91,7 @@
|
|||
<dependency>
|
||||
<groupId>software.amazon.msk</groupId>
|
||||
<artifactId>aws-msk-iam-auth</artifactId>
|
||||
<version>1.1.7</version>
|
||||
<version>1.1.6</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -120,11 +114,6 @@
|
|||
<artifactId>json</artifactId>
|
||||
<version>${org.json.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
@ -141,11 +130,6 @@
|
|||
<artifactId>commons-pool2</artifactId>
|
||||
<version>${apache.commons.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
<version>4.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
|
@ -249,6 +233,8 @@
|
|||
<groupId>org.springframework.security</groupId>
|
||||
<artifactId>spring-security-ldap</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-jsr223</artifactId>
|
||||
|
@ -403,7 +389,7 @@
|
|||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
<version>4.9.10</version>
|
||||
<version>4.0.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>get-the-git-infos</id>
|
||||
|
|
|
@ -57,6 +57,8 @@ public class ClustersProperties {
|
|||
@Data
|
||||
public static class PollingProperties {
|
||||
Integer pollTimeoutMs;
|
||||
Integer partitionPollTimeout;
|
||||
Integer noDataEmptyPolls;
|
||||
Integer maxPageSize;
|
||||
Integer defaultPageSize;
|
||||
}
|
||||
|
@ -150,13 +152,7 @@ public class ClustersProperties {
|
|||
Integer auditTopicsPartitions;
|
||||
Boolean topicAuditEnabled;
|
||||
Boolean consoleAuditEnabled;
|
||||
LogLevel level;
|
||||
Map<String, String> auditTopicProperties;
|
||||
|
||||
public enum LogLevel {
|
||||
ALL,
|
||||
ALTER_ONLY //default
|
||||
}
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
|
|
|
@ -7,6 +7,8 @@ import org.springframework.http.HttpMethod;
|
|||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.server.reactive.ServerHttpRequest;
|
||||
import org.springframework.http.server.reactive.ServerHttpResponse;
|
||||
import org.springframework.web.reactive.config.CorsRegistry;
|
||||
import org.springframework.web.reactive.config.WebFluxConfigurer;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
import org.springframework.web.server.WebFilterChain;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.beans.Transient;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
|
|
@ -13,7 +13,6 @@ abstract class AbstractAuthSecurityConfig {
|
|||
"/resources/**",
|
||||
"/actuator/health/**",
|
||||
"/actuator/info",
|
||||
"/actuator/prometheus",
|
||||
"/auth",
|
||||
"/login",
|
||||
"/logout",
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import lombok.Value;
|
||||
|
||||
public record AuthenticatedUser(String principal, Collection<String> groups) {
|
||||
|
||||
|
|
|
@ -6,13 +6,13 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
|
||||
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
|
||||
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
|
||||
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
|
@ -33,19 +33,15 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
|
||||
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
|
||||
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
|
||||
.logout(spec -> spec
|
||||
.logoutSuccessHandler(logoutSuccessHandler)
|
||||
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
return http
|
||||
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
|
||||
.csrf().disable()
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST).permitAll()
|
||||
.anyExchange().authenticated()
|
||||
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
|
||||
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
|
||||
.and().build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,12 +27,10 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
System.exit(1);
|
||||
}
|
||||
log.warn("Authentication is disabled. Access will be unrestricted.");
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.anyExchange()
|
||||
.permitAll()
|
||||
)
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http.authorizeExchange()
|
||||
.anyExchange().permitAll()
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.springframework.security.authentication.AuthenticationManager;
|
|||
import org.springframework.security.authentication.ProviderManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
|
@ -127,15 +126,21 @@ public class LdapSecurityConfig {
|
|||
log.info("Active Directory support for LDAP has been enabled.");
|
||||
}
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(Customizer.withDefaults())
|
||||
.logout(Customizer.withDefaults())
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.formLogin()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -12,11 +12,10 @@ import lombok.extern.log4j.Log4j2;
|
|||
import org.jetbrains.annotations.Nullable;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
|
@ -50,15 +49,21 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
|
||||
log.info("Configuring OAUTH2 authentication.");
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.oauth2Login(Customizer.withDefaults())
|
||||
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.oauth2Login()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
.logoutSuccessHandler(logoutHandler)
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -98,10 +103,7 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
|
||||
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
|
||||
final List<ClientRegistration> registrations =
|
||||
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
|
||||
if (registrations.isEmpty()) {
|
||||
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
|
||||
}
|
||||
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
|
||||
return new InMemoryReactiveClientRegistrationRepository(registrations);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.user.OAuth2User;
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
public class CognitoCondition extends AbstractProviderCondition implements Condition {
|
||||
@Override
|
||||
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
|
||||
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
|
||||
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,19 +2,12 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
||||
public abstract class AbstractController {
|
||||
|
||||
protected ClustersStorage clustersStorage;
|
||||
protected AccessControlService accessControlService;
|
||||
protected AuditService auditService;
|
||||
private ClustersStorage clustersStorage;
|
||||
|
||||
protected KafkaCluster getCluster(String name) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
|
@ -22,26 +15,8 @@ public abstract class AbstractController {
|
|||
String.format("Cluster with name '%s' not found", name)));
|
||||
}
|
||||
|
||||
protected Mono<Void> validateAccess(AccessContext context) {
|
||||
return accessControlService.validateAccess(context);
|
||||
}
|
||||
|
||||
protected void audit(AccessContext acxt, Signal<?> sig) {
|
||||
auditService.audit(acxt, sig);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||
this.clustersStorage = clustersStorage;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAccessControlService(AccessControlService accessControlService) {
|
||||
this.accessControlService = accessControlService;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAuditService(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -37,7 +38,7 @@ public class AccessController implements AuthorizationApi {
|
|||
.filter(role -> user.groups().contains(role.getName()))
|
||||
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
|
||||
.flatMap(Collection::stream)
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.switchIfEmpty(Mono.just(Collections.emptyList()));
|
||||
|
||||
|
@ -69,10 +70,10 @@ public class AccessController implements AuthorizationApi {
|
|||
.map(String::toUpperCase)
|
||||
.map(this::mapAction)
|
||||
.filter(Objects::nonNull)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
return dto;
|
||||
})
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -2,15 +2,14 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.api.AclsApi;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.service.acl.AclsService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
|
@ -27,6 +26,8 @@ import reactor.core.publisher.Mono;
|
|||
public class AclsController extends AbstractController implements AclsApi {
|
||||
|
||||
private final AclsService aclsService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
|
@ -37,11 +38,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("createAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -54,11 +55,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("deleteAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -84,12 +85,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
|
||||
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
aclsService.listAcls(getCluster(clusterName), filter)
|
||||
.map(ClusterMapper::toKafkaAclDto)))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,11 +101,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("getAclAsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
aclsService.getAclAsCsvString(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.flatMap(Mono::just)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -116,61 +117,10 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
.operationName("syncAclsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(csvMono)
|
||||
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
|
||||
Mono<CreateConsumerAclDTO> createConsumerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createConsumerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createConsumerAclDto)
|
||||
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
|
||||
Mono<CreateProducerAclDTO> createProducerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createProducerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createProducerAclDto)
|
||||
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
|
||||
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createStreamAppAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createStreamAppAclDto)
|
||||
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,8 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ApplicationInfoService;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
|
@ -37,7 +39,7 @@ import reactor.util.function.Tuples;
|
|||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
|
@ -49,10 +51,12 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
private final ApplicationInfoService applicationInfoService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
|
||||
|
@ -65,12 +69,12 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
.applicationConfigActions(VIEW)
|
||||
.operationName("getCurrentConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -80,15 +84,14 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("restartWithConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(restartRequestDto)
|
||||
.doOnNext(restartDto -> {
|
||||
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
|
||||
dynamicConfigOperations.persist(newConfig);
|
||||
.<ResponseEntity<Void>>map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
})
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnSuccess(dto -> restarter.requestRestart())
|
||||
.map(dto -> ResponseEntity.ok().build());
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -98,13 +101,13 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("uploadConfigRelatedFile")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(fileFlux.single())
|
||||
.flatMap(file ->
|
||||
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -114,16 +117,16 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
.applicationConfigActions(EDIT)
|
||||
.operationName("validateConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(configDto)
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = newConfig.getKafka();
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
|
|
|
@ -36,10 +36,10 @@ public class AuthController {
|
|||
+ " <meta name=\"description\" content=\"\">\n"
|
||||
+ " <meta name=\"author\" content=\"\">\n"
|
||||
+ " <title>Please sign in</title>\n"
|
||||
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
|
||||
+ "crossorigin=\"anonymous\">\n"
|
||||
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
|
||||
+ " <link href=\"/static/css/signin.css\" "
|
||||
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
|
||||
+ " </head>\n"
|
||||
+ " <body>\n"
|
||||
|
|
|
@ -11,6 +11,8 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.service.BrokerService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -26,11 +28,12 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class BrokersController extends AbstractController implements BrokersApi {
|
||||
private static final String BROKER_ID = "brokerId";
|
||||
|
||||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
|
||||
private final AuditService auditService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
|
@ -40,9 +43,9 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.build();
|
||||
|
||||
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,14 +57,14 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("id", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -77,10 +80,10 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.operationParams(Map.of("brokerIds", brokerIds))
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -91,14 +94,14 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW)
|
||||
.operationName("getBrokerConfig")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(
|
||||
brokerService.getBrokerConfig(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerConfig))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -110,14 +113,14 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerTopicPartitionLogDir")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
brokerLogdir
|
||||
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -130,14 +133,14 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerConfigByName")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
brokerConfig
|
||||
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||
getCluster(clusterName), id, name, bci.getValue()))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
|||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -19,6 +21,8 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class ClustersController extends AbstractController implements ClustersApi {
|
||||
private final ClusterService clusterService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
|
@ -36,13 +40,13 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("getClusterMetrics")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterMetrics(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -53,13 +57,13 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("getClusterStats")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterStats(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -71,8 +75,8 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
.operationName("updateClusterInfo")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,12 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -39,6 +42,8 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
|
||||
private final ConsumerGroupService consumerGroupService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Value("${consumer.groups.page.size:25}")
|
||||
private int defaultConsumerGroupsPageSize;
|
||||
|
@ -54,9 +59,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("deleteConsumerGroup")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -71,11 +76,11 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("getConsumerGroup")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
.map(ConsumerGroupMapper::toDetailsDto)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -99,9 +104,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -120,7 +125,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.operationName("getConsumerGroupsPage")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
consumerGroupService.getConsumerGroupsPage(
|
||||
getCluster(clusterName),
|
||||
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
|
||||
|
@ -131,7 +136,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
)
|
||||
.map(this::convertPage)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -186,9 +191,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
}
|
||||
};
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(mono.get())
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}).thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -199,7 +204,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
|
||||
.stream()
|
||||
.map(ConsumerGroupMapper::toDto)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@ import com.provectus.kafka.ui.model.TaskDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -36,9 +38,10 @@ import reactor.core.publisher.Mono;
|
|||
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
|
||||
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
|
||||
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
|
||||
private static final String CONNECTOR_NAME = "connectorName";
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||
|
@ -61,9 +64,9 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectors")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,10 +81,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("createConnector")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -97,10 +100,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnector")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -113,13 +116,13 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("deleteConnector")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectName))
|
||||
.operationParams(Map.of("connectorName", connectName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
|
||||
|
@ -147,7 +150,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.sort(comparator);
|
||||
|
||||
return Mono.just(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -163,11 +166,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectorConfig")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -181,14 +184,14 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("setConnectorConfig")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -208,14 +211,14 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(connectActions)
|
||||
.operationName("updateConnectorState")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -228,14 +231,14 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectorTasks")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
ResponseEntity
|
||||
.ok(kafkaConnectService
|
||||
.getConnectorTasks(getCluster(clusterName), connectName, connectorName))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -248,14 +251,14 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
|
||||
.operationName("restartConnectorTask")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -269,11 +272,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.operationName("getConnectorPlugins")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -9,7 +9,9 @@ import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
|
|||
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -27,6 +29,8 @@ import reactor.core.publisher.Mono;
|
|||
public class KsqlController extends AbstractController implements KsqlApi {
|
||||
|
||||
private final KsqlServiceV2 ksqlServiceV2;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
||||
|
@ -40,13 +44,13 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("executeKsql")
|
||||
.operationParams(command)
|
||||
.build();
|
||||
return validateAccess(context).thenReturn(
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
new KsqlCommandV2ResponseDTO().pipeId(
|
||||
ksqlServiceV2.registerCommand(
|
||||
getCluster(clusterName),
|
||||
command.getKsql(),
|
||||
Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
)
|
||||
.map(ResponseEntity::ok);
|
||||
|
@ -62,7 +66,7 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("openKsqlResponsePipe")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return accessControlService.validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
|
||||
.map(table -> new KsqlResponseDTO()
|
||||
.table(
|
||||
|
@ -82,9 +86,9 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("listStreams")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -96,8 +100,8 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
.operationName("listTables")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.DeserializationService;
|
||||
import com.provectus.kafka.ui.service.MessagesService;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -32,7 +33,6 @@ import javax.annotation.Nullable;
|
|||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -49,7 +49,8 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
|
||||
private final MessagesService messagesService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||
|
@ -62,13 +63,13 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.topicActions(MESSAGES_DELETE)
|
||||
.build();
|
||||
|
||||
return validateAccess(context).<ResponseEntity<Void>>then(
|
||||
return accessControlService.validateAccess(context).<ResponseEntity<Void>>then(
|
||||
messagesService.deleteTopicMessages(
|
||||
getCluster(clusterName),
|
||||
topicName,
|
||||
Optional.ofNullable(partitions).orElse(List.of())
|
||||
).thenReturn(ResponseEntity.ok().build())
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -97,10 +98,6 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.topicActions(MESSAGES_READ)
|
||||
.operationName("getTopicMessages");
|
||||
|
||||
if (StringUtils.isNoneEmpty(q) && MessageFilterTypeDTO.GROOVY_SCRIPT == filterQueryType) {
|
||||
dynamicConfigOperations.checkIfFilteringGroovyEnabled();
|
||||
}
|
||||
|
||||
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
|
||||
contextBuilder.auditActions(AuditAction.VIEW);
|
||||
}
|
||||
|
@ -123,9 +120,9 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
);
|
||||
|
||||
var context = contextBuilder.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -140,11 +137,11 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.operationName("sendTopicMessages")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
createTopicMessage.flatMap(msg ->
|
||||
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -195,7 +192,7 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
|
||||
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
Mono.just(dto)
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.map(ResponseEntity::ok)
|
||||
|
|
|
@ -13,8 +13,11 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -35,6 +38,8 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
|
||||
|
||||
private final SchemaRegistryService schemaRegistryService;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
protected KafkaCluster getCluster(String clusterName) {
|
||||
|
@ -56,7 +61,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("checkSchemaCompatibility")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
newSchemaSubjectMono.flatMap(subjectDTO ->
|
||||
schemaRegistryService.checksSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
|
@ -65,7 +70,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
))
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -78,7 +83,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("createNewSchema")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
newSchemaSubjectMono.flatMap(newSubject ->
|
||||
schemaRegistryService.registerNewSchema(
|
||||
getCluster(clusterName),
|
||||
|
@ -87,7 +92,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
)
|
||||
).map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,9 +105,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteLatestSchema")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -117,9 +122,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteSchema")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -134,9 +139,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("deleteSchemaByVersion")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -155,9 +160,9 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
|
||||
.map(kafkaSrMapper::toDto);
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(schemas))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -180,11 +185,11 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("getLatestSchema")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -198,12 +203,12 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationParams(Map.of("subject", subject, "version", version))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
schemaRegistryService.getSchemaSubjectByVersion(
|
||||
getCluster(clusterName), subject, version)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -234,12 +239,12 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
List<String> subjectsToRender = filteredSubjects.stream()
|
||||
.skip(subjectToSkip)
|
||||
.limit(pageSize)
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
|
||||
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
|
||||
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
|
||||
}).map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -252,14 +257,14 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationName("updateGlobalSchemaCompatibilityLevel")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateGlobalSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -275,7 +280,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
.operationParams(Map.of("subject", subject))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateSchemaCompatibility(
|
||||
|
@ -283,7 +288,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
subject,
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
|
|
@ -22,12 +22,13 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
|
|||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.TopicsService;
|
||||
import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -52,6 +53,8 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
private final TopicsService topicsService;
|
||||
private final TopicAnalysisService topicAnalysisService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
private final AuditService auditService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
||||
|
@ -64,12 +67,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationParams(topicCreation)
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(topicsService.createTopic(getCluster(clusterName), topicCreation))
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -83,11 +86,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("recreateTopic")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
topicsService.recreateTopic(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -102,11 +105,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationParams(Map.of("newTopicName", newTopicName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -120,11 +123,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("deleteTopic")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
topicsService.deleteTopic(getCluster(clusterName), topicName)
|
||||
.thenReturn(ResponseEntity.ok().<Void>build())
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
return accessControlService.validateAccess(context).then(
|
||||
topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
|
||||
|
@ -139,15 +140,15 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicConfigs")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
|
||||
.map(lst -> lst.stream()
|
||||
.map(InternalTopicConfig::from)
|
||||
.map(clusterMapper::toTopicConfig)
|
||||
.toList())
|
||||
.collect(toList()))
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,11 +162,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicDetails")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
topicsService.getTopicDetails(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopicDetails)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -208,11 +209,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
|
||||
.map(topicsToRender ->
|
||||
new TopicsResponseDTO()
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
|
||||
.pageCount(totalPages));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -227,12 +228,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("updateTopic")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
topicsService
|
||||
.updateTopic(getCluster(clusterName), topicName, topicUpdate)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -247,11 +248,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.topicActions(VIEW, EDIT)
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
partitionsIncrease.flatMap(partitions ->
|
||||
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -267,12 +268,12 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("changeReplicationFactor")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
replicationFactorChange
|
||||
.flatMap(rfc ->
|
||||
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -285,9 +286,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("analyzeTopic")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
topicAnalysisService.analyze(getCluster(clusterName), topicName)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -302,9 +303,9 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("cancelTopicAnalysis")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnEach(sig -> auditService.audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -321,39 +322,11 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.operationName("getTopicAnalysis")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElseGet(() -> ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW)
|
||||
.operationName("getActiveProducerStates")
|
||||
.build();
|
||||
|
||||
Comparator<TopicProducerStateDTO> ordering =
|
||||
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
|
||||
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
|
||||
|
||||
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
|
||||
.flatMapMany(statesMap ->
|
||||
Flux.fromStream(
|
||||
statesMap.entrySet().stream()
|
||||
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
|
||||
.sorted(ordering)));
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(states)
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(
|
||||
|
|
|
@ -1,23 +1,38 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
public abstract class AbstractEmitter {
|
||||
|
||||
private final MessagesProcessing messagesProcessing;
|
||||
private final PollingSettings pollingSettings;
|
||||
private final PollingThrottler throttler;
|
||||
protected final PollingSettings pollingSettings;
|
||||
|
||||
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
|
||||
this.messagesProcessing = messagesProcessing;
|
||||
this.pollingSettings = pollingSettings;
|
||||
this.throttler = pollingSettings.getPollingThrottler();
|
||||
}
|
||||
|
||||
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
|
||||
var records = consumer.pollEnhanced(pollingSettings.getPollTimeout());
|
||||
sendConsuming(sink, records);
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
|
||||
return poll(sink, consumer, pollingSettings.getPollTimeout());
|
||||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer, Duration timeout) {
|
||||
Instant start = Instant.now();
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
|
||||
Instant finish = Instant.now();
|
||||
int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
|
||||
throttler.throttleAfterPoll(polledBytes);
|
||||
return records;
|
||||
}
|
||||
|
||||
|
@ -25,16 +40,19 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
|
|||
return messagesProcessing.limitReached();
|
||||
}
|
||||
|
||||
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
|
||||
messagesProcessing.send(sink, records);
|
||||
protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecord<Bytes, Bytes> msg) {
|
||||
messagesProcessing.sendMsg(sink, msg);
|
||||
}
|
||||
|
||||
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
|
||||
messagesProcessing.sendPhase(sink, name);
|
||||
}
|
||||
|
||||
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink, PolledRecords records) {
|
||||
messagesProcessing.sentConsumingInfo(sink, records);
|
||||
protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> records,
|
||||
long elapsed) {
|
||||
return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
|
||||
}
|
||||
|
||||
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class BackwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public BackwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
false,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readToOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readToOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readToOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().from()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readToOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readToOffsets.forEach((tp, toOffset) -> {
|
||||
long tpStartOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
if (toOffset > tpStartOffset) {
|
||||
result.put(tp, new FromToOffset(Math.max(tpStartOffset, toOffset - msgsToPollPerPartition), toOffset));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class BackwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
private final int messagesPerPage;
|
||||
|
||||
public BackwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting backward polling for {}", consumerPosition);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Created consumer");
|
||||
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
|
||||
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
|
||||
|
||||
while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) {
|
||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||
if (sink.isCancelled()) {
|
||||
return; //fast return in case of sink cancellation
|
||||
}
|
||||
long beginOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
|
||||
|
||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||
.forEach(r -> sendMessage(sink, r));
|
||||
|
||||
if (beginOffset == readFromOffset) {
|
||||
// we fully read this partition -> removing it from polling iterations
|
||||
readUntilOffsets.remove(tp);
|
||||
} else {
|
||||
// updating 'to' offset for next polling iteration
|
||||
readUntilOffsets.put(tp, readFromOffset);
|
||||
}
|
||||
});
|
||||
if (readUntilOffsets.isEmpty()) {
|
||||
log.debug("begin reached after partitions poll iteration");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.debug("sink is cancelled after partitions poll iteration");
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
||||
TopicPartition tp,
|
||||
long fromOffset,
|
||||
long toOffset,
|
||||
Consumer<Bytes, Bytes> consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink
|
||||
) {
|
||||
consumer.assign(Collections.singleton(tp));
|
||||
consumer.seek(tp, fromOffset);
|
||||
sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset));
|
||||
int desiredMsgsToPoll = (int) (toOffset - fromOffset);
|
||||
|
||||
var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& recordsToSend.size() < desiredMsgsToPoll
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
|
||||
emptyPolls.count(polledRecords);
|
||||
|
||||
log.debug("{} records polled from {}", polledRecords.count(), tp);
|
||||
|
||||
var filteredRecords = polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < toOffset)
|
||||
.toList();
|
||||
|
||||
if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
|
||||
// we already read all messages in target offsets interval
|
||||
break;
|
||||
}
|
||||
recordsToSend.addAll(filteredRecords);
|
||||
}
|
||||
log.debug("{} records to send", recordsToSend.size());
|
||||
Collections.reverse(recordsToSend);
|
||||
return recordsToSend;
|
||||
}
|
||||
}
|
|
@ -2,6 +2,9 @@ package com.provectus.kafka.ui.emitter;
|
|||
|
||||
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
class ConsumingStats {
|
||||
|
@ -9,37 +12,41 @@ class ConsumingStats {
|
|||
private long bytes = 0;
|
||||
private int records = 0;
|
||||
private long elapsed = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
bytes += polledRecords.bytes();
|
||||
records += polledRecords.count();
|
||||
elapsed += polledRecords.elapsed().toMillis();
|
||||
/**
|
||||
* returns bytes polled.
|
||||
*/
|
||||
int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed,
|
||||
int filterApplyErrors) {
|
||||
int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
|
||||
bytes += polledBytes;
|
||||
this.records += polledRecords.count();
|
||||
this.elapsed += elapsed;
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
|
||||
.consuming(createConsumingStats())
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
);
|
||||
return polledBytes;
|
||||
}
|
||||
|
||||
void incFilterApplyError() {
|
||||
filterApplyErrors++;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.DONE)
|
||||
.consuming(createConsumingStats())
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
);
|
||||
}
|
||||
|
||||
private TopicMessageConsumingDTO createConsumingStats() {
|
||||
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
|
||||
int filterApplyErrors) {
|
||||
return new TopicMessageConsumingDTO()
|
||||
.bytesConsumed(bytes)
|
||||
.elapsedMs(elapsed)
|
||||
.isCancelled(false)
|
||||
.bytesConsumed(this.bytes)
|
||||
.elapsedMs(this.elapsed)
|
||||
.isCancelled(sink.isCancelled())
|
||||
.filterApplyErrors(filterApplyErrors)
|
||||
.messagesConsumed(records);
|
||||
.messagesConsumed(this.records);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
|
||||
// In some situations it is hard to say whether records range (between two offsets) was fully polled.
|
||||
// This happens when we have holes in records sequences that is usual case for compact topics or
|
||||
// topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
|
||||
// there is no guarantee that you will ever see record with offset Y.
|
||||
// To workaround this we can assume that after N consecutive empty polls all target messages were read.
|
||||
public class EmptyPollsCounter {
|
||||
|
||||
private final int maxEmptyPolls;
|
||||
|
||||
private int emptyPolls = 0;
|
||||
|
||||
EmptyPollsCounter(int maxEmptyPolls) {
|
||||
this.maxEmptyPolls = maxEmptyPolls;
|
||||
}
|
||||
|
||||
public void count(ConsumerRecords<?, ?> polled) {
|
||||
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
|
||||
}
|
||||
|
||||
public boolean noDataEmptyPollsReached() {
|
||||
return emptyPolls >= maxEmptyPolls;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import java.time.Duration;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.experimental.Delegate;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
|
||||
public class EnhancedConsumer extends KafkaConsumer<Bytes, Bytes> {
|
||||
|
||||
private final PollingThrottler throttler;
|
||||
private final ApplicationMetrics metrics;
|
||||
private String pollingTopic;
|
||||
|
||||
public EnhancedConsumer(Properties properties,
|
||||
PollingThrottler throttler,
|
||||
ApplicationMetrics metrics) {
|
||||
super(properties, new BytesDeserializer(), new BytesDeserializer());
|
||||
this.throttler = throttler;
|
||||
this.metrics = metrics;
|
||||
metrics.activeConsumers().incrementAndGet();
|
||||
}
|
||||
|
||||
public PolledRecords pollEnhanced(Duration dur) {
|
||||
var stopwatch = Stopwatch.createStarted();
|
||||
ConsumerRecords<Bytes, Bytes> polled = poll(dur);
|
||||
PolledRecords polledEnhanced = PolledRecords.create(polled, stopwatch.elapsed());
|
||||
var throttled = throttler.throttleAfterPoll(polledEnhanced.bytes());
|
||||
metrics.meterPolledRecords(pollingTopic, polledEnhanced, throttled);
|
||||
return polledEnhanced;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assign(Collection<TopicPartition> partitions) {
|
||||
super.assign(partitions);
|
||||
Set<String> assignedTopics = partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet());
|
||||
Preconditions.checkState(assignedTopics.size() == 1);
|
||||
this.pollingTopic = assignedTopics.iterator().next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Pattern pattern) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Collection<String> topics) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(Duration timeout) {
|
||||
metrics.activeConsumers().decrementAndGet();
|
||||
super.close(timeout);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class ForwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public ForwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
true,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readFromOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readFromOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readFromOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().to()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readFromOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readFromOffsets.forEach((tp, fromOffset) -> {
|
||||
long tpEndOffset = seekOperations.getEndOffsets().get(tp);
|
||||
if (fromOffset < tpEndOffset) {
|
||||
result.put(tp, new FromToOffset(fromOffset, Math.min(tpEndOffset, fromOffset + msgsToPollPerPartition)));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class ForwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition position;
|
||||
|
||||
public ForwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition position,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.position = position;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting forward polling for {}", position);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Assigning partitions");
|
||||
var seekOperations = SeekOperations.create(consumer, position);
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& !seekOperations.assignedPartitionsFullyPolled()
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
|
||||
sendPhase(sink, "Polling");
|
||||
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
|
||||
emptyPolls.count(records);
|
||||
|
||||
log.debug("{} records polled", records.count());
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||
sendMessage(sink, msg);
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,75 +1,71 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static java.util.stream.Collectors.collectingAndThen;
|
||||
import static java.util.stream.Collectors.groupingBy;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
class MessagesProcessing {
|
||||
public class MessagesProcessing {
|
||||
|
||||
private final ConsumingStats consumingStats = new ConsumingStats();
|
||||
private long sentMessages = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
private final ConsumerRecordDeserializer deserializer;
|
||||
private final Predicate<TopicMessageDTO> filter;
|
||||
private final boolean ascendingSortBeforeSend;
|
||||
private final @Nullable Integer limit;
|
||||
|
||||
public MessagesProcessing(ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
@Nullable Integer limit) {
|
||||
this.deserializer = deserializer;
|
||||
this.filter = filter;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
boolean limitReached() {
|
||||
return limit != null && sentMessages >= limit;
|
||||
}
|
||||
|
||||
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
|
||||
sortForSending(polled, ascendingSortBeforeSend)
|
||||
.forEach(rec -> {
|
||||
if (!limitReached() && !sink.isCancelled()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
consumingStats.incFilterApplyError();
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
});
|
||||
void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
if (!sink.isCancelled() && !limitReached()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
filterApplyErrors++;
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
int sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendConsumingEvt(sink, polledRecords);
|
||||
return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendFinishEvent(sink);
|
||||
consumingStats.sendFinishEvent(sink, filterApplyErrors);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -83,30 +79,4 @@ class MessagesProcessing {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Iterable<ConsumerRecord<Bytes, Bytes>> sortForSending(Iterable<ConsumerRecord<Bytes, Bytes>> records,
|
||||
boolean asc) {
|
||||
Comparator<ConsumerRecord> offsetComparator = asc
|
||||
? Comparator.comparingLong(ConsumerRecord::offset)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::offset).reversed();
|
||||
|
||||
// partition -> sorted by offsets records
|
||||
Map<Integer, List<ConsumerRecord<Bytes, Bytes>>> perPartition = Streams.stream(records)
|
||||
.collect(
|
||||
groupingBy(
|
||||
ConsumerRecord::partition,
|
||||
TreeMap::new,
|
||||
collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList())));
|
||||
|
||||
Comparator<ConsumerRecord> tsComparator = asc
|
||||
? Comparator.comparing(ConsumerRecord::timestamp)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::timestamp).reversed();
|
||||
|
||||
// merge-sorting records from partitions one by one using timestamp comparator
|
||||
return Iterables.mergeSorted(perPartition.values(), tsComparator);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,15 +5,15 @@ import java.util.Collection;
|
|||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Slf4j
|
||||
@Getter
|
||||
class OffsetsInfo {
|
||||
public class OffsetsInfo {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
|
||||
|
@ -23,15 +23,16 @@ class OffsetsInfo {
|
|||
private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
|
||||
private final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
OffsetsInfo(Consumer<?, ?> consumer, Collection<TopicPartition> targetPartitions) {
|
||||
public OffsetsInfo(Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> targetPartitions) {
|
||||
this.consumer = consumer;
|
||||
this.beginOffsets = consumer.beginningOffsets(targetPartitions);
|
||||
this.endOffsets = consumer.endOffsets(targetPartitions);
|
||||
|
@ -45,8 +46,8 @@ class OffsetsInfo {
|
|||
});
|
||||
}
|
||||
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp : consumer.assignment()) {
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp: consumer.assignment()) {
|
||||
Preconditions.checkArgument(endOffsets.containsKey(tp));
|
||||
if (endOffsets.get(tp) > consumer.position(tp)) {
|
||||
return false;
|
||||
|
@ -55,10 +56,4 @@ class OffsetsInfo {
|
|||
return true;
|
||||
}
|
||||
|
||||
long summaryOffsetsRange() {
|
||||
MutableLong cnt = new MutableLong();
|
||||
nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp)));
|
||||
return cnt.getValue();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public record PolledRecords(int count,
|
||||
int bytes,
|
||||
Duration elapsed,
|
||||
ConsumerRecords<Bytes, Bytes> records) implements Iterable<ConsumerRecord<Bytes, Bytes>> {
|
||||
|
||||
static PolledRecords create(ConsumerRecords<Bytes, Bytes> polled, Duration pollDuration) {
|
||||
return new PolledRecords(
|
||||
polled.count(),
|
||||
calculatePolledRecSize(polled),
|
||||
pollDuration,
|
||||
polled
|
||||
);
|
||||
}
|
||||
|
||||
public List<ConsumerRecord<Bytes, Bytes>> records(TopicPartition tp) {
|
||||
return records.records(tp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ConsumerRecord<Bytes, Bytes>> iterator() {
|
||||
return records.iterator();
|
||||
}
|
||||
|
||||
private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
|
||||
int polledBytes = 0;
|
||||
for (ConsumerRecord<Bytes, Bytes> rec : recs) {
|
||||
for (Header header : rec.headers()) {
|
||||
polledBytes +=
|
||||
(header.key() != null ? header.key().getBytes().length : 0)
|
||||
+ (header.value() != null ? header.value().length : 0);
|
||||
}
|
||||
polledBytes += rec.key() == null ? 0 : rec.serializedKeySize();
|
||||
polledBytes += rec.value() == null ? 0 : rec.serializedValueSize();
|
||||
}
|
||||
return polledBytes;
|
||||
}
|
||||
}
|
|
@ -8,8 +8,13 @@ import java.util.function.Supplier;
|
|||
public class PollingSettings {
|
||||
|
||||
private static final Duration DEFAULT_POLL_TIMEOUT = Duration.ofMillis(1_000);
|
||||
private static final Duration DEFAULT_PARTITION_POLL_TIMEOUT = Duration.ofMillis(200);
|
||||
private static final int DEFAULT_NO_DATA_EMPTY_POLLS = 3;
|
||||
|
||||
private final Duration pollTimeout;
|
||||
private final Duration partitionPollTimeout;
|
||||
private final int notDataEmptyPolls; //see EmptyPollsCounter docs
|
||||
|
||||
private final Supplier<PollingThrottler> throttlerSupplier;
|
||||
|
||||
public static PollingSettings create(ClustersProperties.Cluster cluster,
|
||||
|
@ -21,8 +26,18 @@ public class PollingSettings {
|
|||
? Duration.ofMillis(pollingProps.getPollTimeoutMs())
|
||||
: DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
var partitionPollTimeout = pollingProps.getPartitionPollTimeout() != null
|
||||
? Duration.ofMillis(pollingProps.getPartitionPollTimeout())
|
||||
: Duration.ofMillis(pollTimeout.toMillis() / 5);
|
||||
|
||||
int noDataEmptyPolls = pollingProps.getNoDataEmptyPolls() != null
|
||||
? pollingProps.getNoDataEmptyPolls()
|
||||
: DEFAULT_NO_DATA_EMPTY_POLLS;
|
||||
|
||||
return new PollingSettings(
|
||||
pollTimeout,
|
||||
partitionPollTimeout,
|
||||
noDataEmptyPolls,
|
||||
PollingThrottler.throttlerSupplier(cluster)
|
||||
);
|
||||
}
|
||||
|
@ -30,20 +45,34 @@ public class PollingSettings {
|
|||
public static PollingSettings createDefault() {
|
||||
return new PollingSettings(
|
||||
DEFAULT_POLL_TIMEOUT,
|
||||
DEFAULT_PARTITION_POLL_TIMEOUT,
|
||||
DEFAULT_NO_DATA_EMPTY_POLLS,
|
||||
PollingThrottler::noop
|
||||
);
|
||||
}
|
||||
|
||||
private PollingSettings(Duration pollTimeout,
|
||||
Duration partitionPollTimeout,
|
||||
int notDataEmptyPolls,
|
||||
Supplier<PollingThrottler> throttlerSupplier) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
this.partitionPollTimeout = partitionPollTimeout;
|
||||
this.notDataEmptyPolls = notDataEmptyPolls;
|
||||
this.throttlerSupplier = throttlerSupplier;
|
||||
}
|
||||
|
||||
public EmptyPollsCounter createEmptyPollsCounter() {
|
||||
return new EmptyPollsCounter(notDataEmptyPolls);
|
||||
}
|
||||
|
||||
public Duration getPollTimeout() {
|
||||
return pollTimeout;
|
||||
}
|
||||
|
||||
public Duration getPartitionPollTimeout() {
|
||||
return partitionPollTimeout;
|
||||
}
|
||||
|
||||
public PollingThrottler getPollingThrottler() {
|
||||
return throttlerSupplier.get();
|
||||
}
|
||||
|
|
|
@ -3,8 +3,11 @@ package com.provectus.kafka.ui.emitter;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Slf4j
|
||||
public class PollingThrottler {
|
||||
|
@ -33,17 +36,18 @@ public class PollingThrottler {
|
|||
return new PollingThrottler("noop", RateLimiter.create(Long.MAX_VALUE));
|
||||
}
|
||||
|
||||
//returns true if polling was throttled
|
||||
public boolean throttleAfterPoll(int polledBytes) {
|
||||
public void throttleAfterPoll(int polledBytes) {
|
||||
if (polledBytes > 0) {
|
||||
double sleptSeconds = rateLimiter.acquire(polledBytes);
|
||||
if (!throttled && sleptSeconds > 0.0) {
|
||||
throttled = true;
|
||||
log.debug("Polling throttling enabled for cluster {} at rate {} bytes/sec", clusterName, rateLimiter.getRate());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void throttleAfterPoll(ConsumerRecords<Bytes, Bytes> polled) {
|
||||
throttleAfterPoll(ConsumerRecordsUtil.calculatePolledSize(polled));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
abstract class RangePollingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
protected final int messagesPerPage;
|
||||
|
||||
protected RangePollingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
|
||||
}
|
||||
|
||||
//should return empty map if polling should be stopped
|
||||
protected abstract TreeMap<TopicPartition, FromToOffset> nextPollingRange(
|
||||
TreeMap<TopicPartition, FromToOffset> prevRange, //empty on start
|
||||
SeekOperations seekOperations
|
||||
);
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Consumer created");
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
|
||||
log.debug("Starting from offsets {}", pollRange);
|
||||
|
||||
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
|
||||
var polled = poll(consumer, sink, pollRange);
|
||||
send(sink, polled);
|
||||
pollRange = nextPollingRange(pollRange, seekOperations);
|
||||
}
|
||||
if (sink.isCancelled()) {
|
||||
log.debug("Polling finished due to sink cancellation");
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> poll(EnhancedConsumer consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink,
|
||||
TreeMap<TopicPartition, FromToOffset> range) {
|
||||
log.trace("Polling range {}", range);
|
||||
sendPhase(sink,
|
||||
"Polling partitions: %s".formatted(range.keySet().stream().map(TopicPartition::partition).sorted().toList()));
|
||||
|
||||
consumer.assign(range.keySet());
|
||||
range.forEach((tp, fromTo) -> consumer.seek(tp, fromTo.from));
|
||||
|
||||
List<ConsumerRecord<Bytes, Bytes>> result = new ArrayList<>();
|
||||
while (!sink.isCancelled() && consumer.paused().size() < range.size()) {
|
||||
var polledRecords = poll(sink, consumer);
|
||||
range.forEach((tp, fromTo) -> {
|
||||
polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < fromTo.to)
|
||||
.forEach(result::add);
|
||||
|
||||
//next position is out of target range -> pausing partition
|
||||
if (consumer.position(tp) >= fromTo.to) {
|
||||
consumer.pause(List.of(tp));
|
||||
}
|
||||
});
|
||||
}
|
||||
consumer.resume(consumer.paused());
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -10,18 +10,17 @@ import java.util.stream.Collectors;
|
|||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
|
||||
public class SeekOperations {
|
||||
class SeekOperations {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
private final OffsetsInfo offsetsInfo;
|
||||
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||
|
||||
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
OffsetsInfo offsetsInfo;
|
||||
if (consumerPosition.getSeekTo() == null) {
|
||||
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
|
||||
|
@ -35,37 +34,25 @@ public class SeekOperations {
|
|||
);
|
||||
}
|
||||
|
||||
public void assignAndSeekNonEmptyPartitions() {
|
||||
void assignAndSeekNonEmptyPartitions() {
|
||||
consumer.assign(offsetsForSeek.keySet());
|
||||
offsetsForSeek.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> getBeginOffsets() {
|
||||
Map<TopicPartition, Long> getBeginOffsets() {
|
||||
return offsetsInfo.getBeginOffsets();
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> getEndOffsets() {
|
||||
Map<TopicPartition, Long> getEndOffsets() {
|
||||
return offsetsInfo.getEndOffsets();
|
||||
}
|
||||
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
return offsetsInfo.assignedPartitionsFullyPolled();
|
||||
}
|
||||
|
||||
// sum of (end - start) offsets for all partitions
|
||||
public long summaryOffsetsRange() {
|
||||
return offsetsInfo.summaryOffsetsRange();
|
||||
}
|
||||
|
||||
// sum of differences between initial consumer seek and current consumer position (across all partitions)
|
||||
public long offsetsProcessedFromSeek() {
|
||||
MutableLong count = new MutableLong();
|
||||
offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset));
|
||||
return count.getValue();
|
||||
}
|
||||
|
||||
// Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
|
||||
public Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
return offsetsForSeek;
|
||||
}
|
||||
|
||||
|
@ -74,19 +61,19 @@ public class SeekOperations {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
switch (seekType) {
|
||||
case LATEST:
|
||||
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case BEGINNING:
|
||||
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case OFFSET:
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
return fixOffsets(offsetsInfo, seekTo);
|
||||
case TIMESTAMP:
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
|
@ -113,7 +100,7 @@ public class SeekOperations {
|
|||
}
|
||||
|
||||
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
timestamps = new HashMap<>(timestamps);
|
||||
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
|
||||
|
|
|
@ -1,28 +1,27 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.HashMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class TailingEmitter extends AbstractEmitter {
|
||||
public class TailingEmitter extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public TailingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings);
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
@ -30,12 +29,12 @@ public class TailingEmitter extends AbstractEmitter {
|
|||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting tailing polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
assignAndSeek(consumer);
|
||||
while (!sink.isCancelled()) {
|
||||
sendPhase(sink, "Polling");
|
||||
var polled = poll(sink, consumer);
|
||||
send(sink, polled);
|
||||
polled.forEach(r -> sendMessage(sink, r));
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Tailing finished");
|
||||
|
@ -48,7 +47,7 @@ public class TailingEmitter extends AbstractEmitter {
|
|||
}
|
||||
}
|
||||
|
||||
private void assignAndSeek(EnhancedConsumer consumer) {
|
||||
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
|
||||
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions
|
||||
|
|
|
@ -106,7 +106,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
err.setFieldName(e.getKey());
|
||||
err.setRestrictions(List.copyOf(e.getValue()));
|
||||
return err;
|
||||
}).toList();
|
||||
}).collect(Collectors.toList());
|
||||
|
||||
var message = fieldsErrors.isEmpty()
|
||||
? exception.getMessage()
|
||||
|
|
|
@ -30,12 +30,11 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
|
|||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.common.acl.AccessControlEntry;
|
||||
import org.apache.kafka.common.acl.AclBinding;
|
||||
import org.apache.kafka.common.acl.AclOperation;
|
||||
|
@ -55,7 +54,7 @@ public interface ClusterMapper {
|
|||
|
||||
default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
|
||||
return new ClusterMetricsDTO()
|
||||
.items(metrics.getSummarizedMetrics().map(this::convert).toList());
|
||||
.items(metrics.getSummarizedMetrics().map(this::convert).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
private MetricDTO convert(RawMetric rawMetric) {
|
||||
|
@ -67,7 +66,7 @@ public interface ClusterMapper {
|
|||
|
||||
default BrokerMetricsDTO toBrokerMetrics(List<RawMetric> metrics) {
|
||||
return new BrokerMetricsDTO()
|
||||
.metrics(metrics.stream().map(this::convert).toList());
|
||||
.metrics(metrics.stream().map(this::convert).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
@Mapping(target = "isSensitive", source = "sensitive")
|
||||
|
@ -108,7 +107,7 @@ public interface ClusterMapper {
|
|||
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
|
||||
|
||||
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
|
||||
return map.values().stream().map(this::toPartition).toList();
|
||||
return map.values().stream().map(this::toPartition).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
|
||||
|
@ -119,17 +118,6 @@ public interface ClusterMapper {
|
|||
return brokerDiskUsage;
|
||||
}
|
||||
|
||||
default TopicProducerStateDTO map(int partition, ProducerState state) {
|
||||
return new TopicProducerStateDTO()
|
||||
.partition(partition)
|
||||
.producerId(state.producerId())
|
||||
.producerEpoch(state.producerEpoch())
|
||||
.lastSequence(state.lastSequence())
|
||||
.lastTimestampMs(state.lastTimestamp())
|
||||
.coordinatorEpoch(state.coordinatorEpoch().stream().boxed().findAny().orElse(null))
|
||||
.currentTransactionStartOffset(state.currentTransactionStartOffset().stream().boxed().findAny().orElse(null));
|
||||
}
|
||||
|
||||
static KafkaAclDTO.OperationEnum mapAclOperation(AclOperation operation) {
|
||||
return switch (operation) {
|
||||
case ALL -> KafkaAclDTO.OperationEnum.ALL;
|
||||
|
|
|
@ -21,7 +21,7 @@ public class DescribeLogDirsMapper {
|
|||
return logDirsInfo.entrySet().stream().map(
|
||||
mapEntry -> mapEntry.getValue().entrySet().stream()
|
||||
.map(e -> toBrokerLogDirs(mapEntry.getKey(), e.getKey(), e.getValue()))
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ public class DescribeLogDirsMapper {
|
|||
var topics = logDirInfo.replicaInfos.entrySet().stream()
|
||||
.collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
|
||||
.map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
result.setTopics(topics);
|
||||
return result;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ public class DescribeLogDirsMapper {
|
|||
topic.setPartitions(
|
||||
partitions.stream().map(
|
||||
e -> topicPartitionLogDir(
|
||||
broker, e.getKey().partition(), e.getValue())).toList()
|
||||
broker, e.getKey().partition(), e.getValue())).collect(Collectors.toList())
|
||||
);
|
||||
return topic;
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ public interface KafkaConnectMapper {
|
|||
com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
|
||||
connectorPluginConfigValidationResponse);
|
||||
|
||||
default FullConnectorInfoDTO fullConnectorInfo(InternalConnectInfo connectInfo) {
|
||||
default FullConnectorInfoDTO fullConnectorInfoFromTuple(InternalConnectInfo connectInfo) {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
List<TaskDTO> tasks = connectInfo.getTasks();
|
||||
int failedTasksCount = (int) tasks.stream()
|
||||
|
|
|
@ -3,21 +3,18 @@ package com.provectus.kafka.ui.mapper;
|
|||
import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaReferenceDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaSubjectDTO;
|
||||
import com.provectus.kafka.ui.model.SchemaTypeDTO;
|
||||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import com.provectus.kafka.ui.sr.model.Compatibility;
|
||||
import com.provectus.kafka.ui.sr.model.CompatibilityCheckResponse;
|
||||
import com.provectus.kafka.ui.sr.model.NewSubject;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaReference;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaType;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import org.mapstruct.Mapper;
|
||||
|
||||
|
||||
@Mapper
|
||||
@Mapper(componentModel = "spring")
|
||||
public interface KafkaSrMapper {
|
||||
|
||||
default SchemaSubjectDTO toDto(SchemaRegistryService.SubjectWithCompatibilityLevel s) {
|
||||
|
@ -27,12 +24,9 @@ public interface KafkaSrMapper {
|
|||
.subject(s.getSubject())
|
||||
.schema(s.getSchema())
|
||||
.schemaType(SchemaTypeDTO.fromValue(Optional.ofNullable(s.getSchemaType()).orElse(SchemaType.AVRO).getValue()))
|
||||
.references(toDto(s.getReferences()))
|
||||
.compatibilityLevel(s.getCompatibility().toString());
|
||||
}
|
||||
|
||||
List<SchemaReferenceDTO> toDto(List<SchemaReference> references);
|
||||
|
||||
CompatibilityCheckResponseDTO toDto(CompatibilityCheckResponse ccr);
|
||||
|
||||
CompatibilityLevelDTO.CompatibilityEnum toDto(Compatibility compatibility);
|
||||
|
|
|
@ -44,7 +44,7 @@ public class InternalLogDirStats {
|
|||
topicMap.getValue().replicaInfos.entrySet().stream()
|
||||
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
|
||||
)
|
||||
).toList();
|
||||
).collect(toList());
|
||||
|
||||
partitionsStats = topicPartitions.stream().collect(
|
||||
groupingBy(
|
||||
|
|
|
@ -52,8 +52,6 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public static final class AccessContextBuilder {
|
||||
private static final String ACTIONS_NOT_PRESENT = "actions not present";
|
||||
|
||||
private Collection<ApplicationConfigAction> applicationConfigActions = Collections.emptySet();
|
||||
private String cluster;
|
||||
private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
|
||||
|
@ -77,7 +75,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder applicationConfigActions(ApplicationConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.applicationConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -88,7 +86,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder clusterConfigActions(ClusterConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.clusterConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -99,7 +97,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder topicActions(TopicAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.topicActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -110,7 +108,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder consumerGroupActions(ConsumerGroupAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.consumerGroupActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -121,7 +119,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder connectActions(ConnectAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.connectActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -137,25 +135,25 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder schemaActions(SchemaAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.schemaActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder ksqlActions(KsqlAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.ksqlActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder aclActions(AclAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.aclActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder auditActions(AuditAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.auditActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -16,14 +16,13 @@ import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.ToString;
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@Getter
|
||||
|
@ -74,10 +73,6 @@ public class Permission {
|
|||
}
|
||||
|
||||
private List<String> getAllActionValues() {
|
||||
if (resource == null) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
return switch (this.resource) {
|
||||
case APPLICATIONCONFIG -> Arrays.stream(ApplicationConfigAction.values()).map(Enum::toString).toList();
|
||||
case CLUSTERCONFIG -> Arrays.stream(ClusterConfigAction.values()).map(Enum::toString).toList();
|
||||
|
|
|
@ -1,25 +1,15 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum AclAction implements PermissibleAction {
|
||||
|
||||
VIEW,
|
||||
EDIT
|
||||
|
||||
;
|
||||
|
||||
public static final Set<AclAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
EDIT;
|
||||
|
||||
@Nullable
|
||||
public static AclAction fromString(String name) {
|
||||
return EnumUtils.getEnum(AclAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -11,15 +10,9 @@ public enum ApplicationConfigAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ApplicationConfigAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static ApplicationConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ApplicationConfigAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,24 +1,14 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum AuditAction implements PermissibleAction {
|
||||
|
||||
VIEW
|
||||
|
||||
;
|
||||
|
||||
private static final Set<AuditAction> ALTER_ACTIONS = Set.of();
|
||||
VIEW;
|
||||
|
||||
@Nullable
|
||||
public static AuditAction fromString(String name) {
|
||||
return EnumUtils.getEnum(AuditAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -11,15 +10,9 @@ public enum ClusterConfigAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ClusterConfigAction> ALTER_ACTIONS = Set.of(EDIT);
|
||||
|
||||
@Nullable
|
||||
public static ClusterConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ClusterConfigAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -13,15 +12,9 @@ public enum ConnectAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<ConnectAction> ALTER_ACTIONS = Set.of(CREATE, EDIT, RESTART);
|
||||
|
||||
@Nullable
|
||||
public static ConnectAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ConnectAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -8,19 +7,14 @@ public enum ConsumerGroupAction implements PermissibleAction {
|
|||
|
||||
VIEW,
|
||||
DELETE,
|
||||
|
||||
RESET_OFFSETS
|
||||
|
||||
;
|
||||
|
||||
public static final Set<ConsumerGroupAction> ALTER_ACTIONS = Set.of(DELETE, RESET_OFFSETS);
|
||||
|
||||
@Nullable
|
||||
public static ConsumerGroupAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ConsumerGroupAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,24 +1,15 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum KsqlAction implements PermissibleAction {
|
||||
|
||||
EXECUTE
|
||||
|
||||
;
|
||||
|
||||
public static final Set<KsqlAction> ALTER_ACTIONS = Set.of(EXECUTE);
|
||||
EXECUTE;
|
||||
|
||||
@Nullable
|
||||
public static KsqlAction fromString(String name) {
|
||||
return EnumUtils.getEnum(KsqlAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,9 +5,4 @@ public sealed interface PermissibleAction permits
|
|||
ConsumerGroupAction, SchemaAction,
|
||||
ConnectAction, ClusterConfigAction,
|
||||
KsqlAction, TopicAction, AuditAction {
|
||||
|
||||
String name();
|
||||
|
||||
boolean isAlter();
|
||||
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -14,15 +13,9 @@ public enum SchemaAction implements PermissibleAction {
|
|||
|
||||
;
|
||||
|
||||
public static final Set<SchemaAction> ALTER_ACTIONS = Set.of(CREATE, DELETE, EDIT, MODIFY_GLOBAL_COMPATIBILITY);
|
||||
|
||||
@Nullable
|
||||
public static SchemaAction fromString(String name) {
|
||||
return EnumUtils.getEnum(SchemaAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
|
@ -10,21 +9,16 @@ public enum TopicAction implements PermissibleAction {
|
|||
CREATE,
|
||||
EDIT,
|
||||
DELETE,
|
||||
|
||||
MESSAGES_READ,
|
||||
MESSAGES_PRODUCE,
|
||||
MESSAGES_DELETE,
|
||||
|
||||
;
|
||||
|
||||
public static final Set<TopicAction> ALTER_ACTIONS = Set.of(CREATE, EDIT, DELETE, MESSAGES_PRODUCE, MESSAGES_DELETE);
|
||||
|
||||
@Nullable
|
||||
public static TopicAction fromString(String name) {
|
||||
return EnumUtils.getEnum(TopicAction.class, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAlter() {
|
||||
return ALTER_ACTIONS.contains(this);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.serdes;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO.TimestampTypeEnum;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import java.time.Instant;
|
||||
import java.time.OffsetDateTime;
|
||||
|
@ -9,7 +8,6 @@ import java.time.ZoneId;
|
|||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.UnaryOperator;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
@ -34,8 +32,6 @@ public class ConsumerRecordDeserializer {
|
|||
private final Serde.Deserializer fallbackKeyDeserializer;
|
||||
private final Serde.Deserializer fallbackValueDeserializer;
|
||||
|
||||
private final UnaryOperator<TopicMessageDTO> masker;
|
||||
|
||||
public TopicMessageDTO deserialize(ConsumerRecord<Bytes, Bytes> rec) {
|
||||
var message = new TopicMessageDTO();
|
||||
fillKey(message, rec);
|
||||
|
@ -51,15 +47,20 @@ public class ConsumerRecordDeserializer {
|
|||
message.setValueSize(getValueSize(rec));
|
||||
message.setHeadersSize(getHeadersSize(rec));
|
||||
|
||||
return masker.apply(message);
|
||||
return message;
|
||||
}
|
||||
|
||||
private static TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
return switch (timestampType) {
|
||||
case CREATE_TIME -> TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME -> TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE -> TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
};
|
||||
private static TopicMessageDTO.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
|
||||
switch (timestampType) {
|
||||
case CREATE_TIME:
|
||||
return TopicMessageDTO.TimestampTypeEnum.CREATE_TIME;
|
||||
case LOG_APPEND_TIME:
|
||||
return TopicMessageDTO.TimestampTypeEnum.LOG_APPEND_TIME;
|
||||
case NO_TIMESTAMP_TYPE:
|
||||
return TopicMessageDTO.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown timestampType: " + timestampType);
|
||||
}
|
||||
}
|
||||
|
||||
private void fillHeaders(TopicMessageDTO message, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
|
|
|
@ -12,11 +12,9 @@ import com.provectus.kafka.ui.serde.api.Serde;
|
|||
import com.provectus.kafka.ui.serdes.builtin.AvroEmbeddedSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Base64Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ConsumerOffsetsSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.HexSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int64Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.ProtobufRawSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.UInt32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.UInt64Serde;
|
||||
|
@ -49,9 +47,7 @@ public class SerdesInitializer {
|
|||
.put(UInt64Serde.name(), UInt64Serde.class)
|
||||
.put(AvroEmbeddedSerde.name(), AvroEmbeddedSerde.class)
|
||||
.put(Base64Serde.name(), Base64Serde.class)
|
||||
.put(HexSerde.name(), HexSerde.class)
|
||||
.put(UuidBinarySerde.name(), UuidBinarySerde.class)
|
||||
.put(ProtobufRawSerde.name(), ProtobufRawSerde.class)
|
||||
.build(),
|
||||
new CustomSerdeLoader()
|
||||
);
|
||||
|
|
|
@ -19,6 +19,12 @@ public class AvroEmbeddedSerde implements BuiltInSerde {
|
|||
return "Avro (Embedded)";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Base64;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
|
||||
public class Base64Serde implements BuiltInSerde {
|
||||
|
||||
|
@ -13,6 +16,12 @@ public class Base64Serde implements BuiltInSerde {
|
|||
return "Base64";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
|
@ -35,25 +44,31 @@ public class Base64Serde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
var decoder = Base64.getDecoder();
|
||||
return inputString -> {
|
||||
inputString = inputString.trim();
|
||||
// it is actually a hack to provide ability to sent empty array as a key/value
|
||||
if (inputString.length() == 0) {
|
||||
return new byte[] {};
|
||||
return new Serializer() {
|
||||
@Override
|
||||
public byte[] serialize(String input) {
|
||||
input = input.trim();
|
||||
// it is actually a hack to provide ability to sent empty array as a key/value
|
||||
if (input.length() == 0) {
|
||||
return new byte[]{};
|
||||
}
|
||||
return Base64.getDecoder().decode(input);
|
||||
}
|
||||
return decoder.decode(inputString);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
var encoder = Base64.getEncoder();
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
return new DeserializeResult(
|
||||
encoder.encodeToString(data),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.provectus.kafka.ui.serdes.builtin;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.databind.JsonSerializer;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.SerializerProvider;
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
import com.fasterxml.jackson.databind.module.SimpleModule;
|
||||
|
@ -28,23 +27,6 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
|
||||
private static final JsonMapper JSON_MAPPER = createMapper();
|
||||
|
||||
private static final String ASSIGNMENT = "assignment";
|
||||
private static final String CLIENT_HOST = "client_host";
|
||||
private static final String CLIENT_ID = "client_id";
|
||||
private static final String COMMIT_TIMESTAMP = "commit_timestamp";
|
||||
private static final String CURRENT_STATE_TIMESTAMP = "current_state_timestamp";
|
||||
private static final String GENERATION = "generation";
|
||||
private static final String LEADER = "leader";
|
||||
private static final String MEMBERS = "members";
|
||||
private static final String MEMBER_ID = "member_id";
|
||||
private static final String METADATA = "metadata";
|
||||
private static final String OFFSET = "offset";
|
||||
private static final String PROTOCOL = "protocol";
|
||||
private static final String PROTOCOL_TYPE = "protocol_type";
|
||||
private static final String REBALANCE_TIMEOUT = "rebalance_timeout";
|
||||
private static final String SESSION_TIMEOUT = "session_timeout";
|
||||
private static final String SUBSCRIPTION = "subscription";
|
||||
|
||||
public static final String TOPIC = "__consumer_offsets";
|
||||
|
||||
public static String name() {
|
||||
|
@ -133,128 +115,128 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
private Deserializer valueDeserializer() {
|
||||
final Schema commitOffsetSchemaV0 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV1 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, ""),
|
||||
new Field("expire_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV2 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV3 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("leader_epoch", Type.INT32, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV4 = new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("leader_epoch", Type.INT32, ""),
|
||||
new Field(METADATA, Type.COMPACT_STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
|
||||
new Field("metadata", Type.COMPACT_STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
);
|
||||
|
||||
final Schema metadataSchema0 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema1 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema2 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema3 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("group_instance_id", Type.NULLABLE_STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema4 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.COMPACT_STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new CompactArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.COMPACT_STRING, ""),
|
||||
new Field("protocol_type", Type.COMPACT_STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new CompactArrayOf(new Schema(
|
||||
new Field("member_id", Type.COMPACT_STRING, ""),
|
||||
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(CLIENT_ID, Type.COMPACT_STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.COMPACT_STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.COMPACT_BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.COMPACT_BYTES, ""),
|
||||
new Field("client_id", Type.COMPACT_STRING, ""),
|
||||
new Field("client_host", Type.COMPACT_STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.COMPACT_BYTES, ""),
|
||||
new Field("assignment", Type.COMPACT_BYTES, ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
)), ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
|
@ -266,7 +248,7 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
short version = bb.getShort();
|
||||
// ideally, we should distinguish if value is commit or metadata
|
||||
// by checking record's key, but our current serde structure doesn't allow that.
|
||||
// so, we are trying to parse into metadata first and after into commit msg
|
||||
// so, we trying to parse into metadata first and after into commit msg
|
||||
try {
|
||||
result = toJson(
|
||||
switch (version) {
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.HexFormat;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
||||
public class HexSerde implements BuiltInSerde {
|
||||
|
||||
private HexFormat deserializeHexFormat;
|
||||
|
||||
public static String name() {
|
||||
return "Hex";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void autoConfigure(PropertyResolver kafkaClusterProperties, PropertyResolver globalProperties) {
|
||||
configure(" ", true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
String delim = serdeProperties.getProperty("delimiter", String.class).orElse(" ");
|
||||
boolean uppercase = serdeProperties.getProperty("uppercase", Boolean.class).orElse(true);
|
||||
configure(delim, uppercase);
|
||||
}
|
||||
|
||||
private void configure(String delim, boolean uppercase) {
|
||||
deserializeHexFormat = HexFormat.ofDelimiter(delim);
|
||||
if (uppercase) {
|
||||
deserializeHexFormat = deserializeHexFormat.withUpperCase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<SchemaDescription> getSchema(String topic, Target type) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDeserialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSerialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
return input -> {
|
||||
input = input.trim();
|
||||
// it is a hack to provide ability to sent empty array as a key/value
|
||||
if (input.length() == 0) {
|
||||
return new byte[] {};
|
||||
}
|
||||
return HexFormat.of().parseHex(prepareInputForParse(input));
|
||||
};
|
||||
}
|
||||
|
||||
// removing most-common delimiters and prefixes
|
||||
private static String prepareInputForParse(String input) {
|
||||
return input
|
||||
.replaceAll(" ", "")
|
||||
.replaceAll("#", "")
|
||||
.replaceAll(":", "");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
deserializeHexFormat.formatHex(data),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.serdes.builtin;
|
|||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
|
|
|
@ -55,11 +55,15 @@ public class Int64Serde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
return new DeserializeResult(
|
||||
String.valueOf(Longs.fromByteArray(data)),
|
||||
DeserializeResult.Type.JSON,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.google.protobuf.UnknownFieldSet;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import lombok.SneakyThrows;
|
||||
|
||||
public class ProtobufRawSerde implements BuiltInSerde {
|
||||
|
||||
public static String name() {
|
||||
return "ProtobufDecodeRaw";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<SchemaDescription> getSchema(String topic, Target type) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSerialize(String topic, Target type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDeserialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@SneakyThrows
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
try {
|
||||
UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data);
|
||||
return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of());
|
||||
} catch (Exception e) {
|
||||
throw new ValidationException(e.getMessage());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -1,8 +1,10 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.google.common.primitives.Longs;
|
||||
import com.google.common.primitives.UnsignedInteger;
|
||||
import com.google.common.primitives.UnsignedLong;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
|
@ -30,7 +32,7 @@ public class UInt64Serde implements BuiltInSerde {
|
|||
+ " \"minimum\" : 0, "
|
||||
+ " \"maximum\" : %s "
|
||||
+ "}",
|
||||
UnsignedLong.MAX_VALUE
|
||||
UnsignedInteger.MAX_VALUE
|
||||
),
|
||||
Map.of()
|
||||
)
|
||||
|
@ -54,11 +56,15 @@ public class UInt64Serde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) ->
|
||||
new DeserializeResult(
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
return new DeserializeResult(
|
||||
UnsignedLong.fromLongBits(Longs.fromByteArray(data)).toString(),
|
||||
DeserializeResult.Type.JSON,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,35 +50,41 @@ public class UuidBinarySerde implements BuiltInSerde {
|
|||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
return input -> {
|
||||
UUID uuid = UUID.fromString(input);
|
||||
ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
|
||||
if (mostSignificantBitsFirst) {
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
} else {
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
return new Serializer() {
|
||||
@Override
|
||||
public byte[] serialize(String input) {
|
||||
UUID uuid = UUID.fromString(input);
|
||||
ByteBuffer bb = ByteBuffer.wrap(new byte[16]);
|
||||
if (mostSignificantBitsFirst) {
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
} else {
|
||||
bb.putLong(uuid.getLeastSignificantBits());
|
||||
bb.putLong(uuid.getMostSignificantBits());
|
||||
}
|
||||
return bb.array();
|
||||
}
|
||||
return bb.array();
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) -> {
|
||||
if (data.length != 16) {
|
||||
throw new ValidationException("UUID data should be 16 bytes, but it is " + data.length);
|
||||
return new Deserializer() {
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
if (data.length != 16) {
|
||||
throw new ValidationException("UUID data should be 16 bytes, but it is " + data.length);
|
||||
}
|
||||
ByteBuffer bb = ByteBuffer.wrap(data);
|
||||
long msb = bb.getLong();
|
||||
long lsb = bb.getLong();
|
||||
UUID uuid = mostSignificantBitsFirst ? new UUID(msb, lsb) : new UUID(lsb, msb);
|
||||
return new DeserializeResult(
|
||||
uuid.toString(),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
}
|
||||
ByteBuffer bb = ByteBuffer.wrap(data);
|
||||
long msb = bb.getLong();
|
||||
long lsb = bb.getLong();
|
||||
UUID uuid = mostSignificantBitsFirst ? new UUID(msb, lsb) : new UUID(lsb, msb);
|
||||
return new DeserializeResult(
|
||||
uuid.toString(),
|
||||
DeserializeResult.Type.STRING,
|
||||
Map.of()
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
|
||||
import io.confluent.kafka.serializers.KafkaAvroSerializer;
|
||||
import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
class AvroSchemaRegistrySerializer extends SchemaRegistrySerializer<Object> {
|
||||
|
||||
AvroSchemaRegistrySerializer(String topic, boolean isKey,
|
||||
SchemaRegistryClient client,
|
||||
SchemaMetadata schema) {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaAvroSerializer(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
KafkaAvroSerializerConfig.AVRO_USE_LOGICAL_TYPE_CONVERTERS_CONFIG, true,
|
||||
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object serialize(String value, ParsedSchema schema) {
|
||||
try {
|
||||
return JsonAvroConversion.convertJsonToAvro(value, ((AvroSchema) schema).rawSchema());
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
|
||||
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
class JsonSchemaSchemaRegistrySerializer extends SchemaRegistrySerializer<JsonNode> {
|
||||
|
||||
private static final ObjectMapper MAPPER = new ObjectMapper();
|
||||
|
||||
JsonSchemaSchemaRegistrySerializer(String topic,
|
||||
boolean isKey,
|
||||
SchemaRegistryClient client,
|
||||
SchemaMetadata schema) {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<JsonNode> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaJsonSchemaSerializerWithoutSchemaInfer(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JsonNode serialize(String value, ParsedSchema schema) {
|
||||
try {
|
||||
JsonNode json = MAPPER.readTree(value);
|
||||
((JsonSchema) schema).validate(json);
|
||||
return json;
|
||||
} catch (JsonProcessingException e) {
|
||||
throw new ValidationException(String.format("'%s' is not valid json", value));
|
||||
} catch (org.everit.json.schema.ValidationException e) {
|
||||
throw new ValidationException(
|
||||
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant
|
||||
private class KafkaJsonSchemaSerializerWithoutSchemaInfer
|
||||
extends KafkaJsonSchemaSerializer<JsonNode> {
|
||||
|
||||
KafkaJsonSchemaSerializerWithoutSchemaInfer(SchemaRegistryClient client) {
|
||||
super(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Need to override original method because it tries to infer schema from input
|
||||
* by checking 'schema' json field or @Schema annotation on input class, which is not
|
||||
* possible in our case. So, we just skip all infer logic and pass schema directly.
|
||||
*/
|
||||
@Override
|
||||
public byte[] serialize(String topic, JsonNode rec) {
|
||||
return super.serializeImpl(
|
||||
super.getSubjectName(topic, isKey, rec, schema),
|
||||
rec,
|
||||
(JsonSchema) schema
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
|
||||
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
|
||||
import java.util.Map;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
class ProtobufSchemaRegistrySerializer extends SchemaRegistrySerializer<Message> {
|
||||
|
||||
@SneakyThrows
|
||||
public ProtobufSchemaRegistrySerializer(String topic, boolean isKey,
|
||||
SchemaRegistryClient client, SchemaMetadata schema) {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaProtobufSerializer<>(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Message serialize(String value, ParsedSchema schema) {
|
||||
ProtobufSchema protobufSchema = (ProtobufSchema) schema;
|
||||
DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
|
||||
try {
|
||||
JsonFormat.parser().merge(value, builder);
|
||||
return builder.build();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,8 +1,5 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeAvro;
|
||||
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeJson;
|
||||
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeProto;
|
||||
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.BASIC_AUTH_CREDENTIALS_SOURCE;
|
||||
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.USER_INFO_CONFIG;
|
||||
|
||||
|
@ -10,6 +7,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
|
||||
|
@ -22,7 +20,6 @@ import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
|||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClientConfig;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
|
||||
|
@ -34,21 +31,17 @@ import java.util.Map;
|
|||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
|
||||
|
||||
public class SchemaRegistrySerde implements BuiltInSerde {
|
||||
|
||||
private static final byte SR_PAYLOAD_MAGIC_BYTE = 0x0;
|
||||
private static final int SR_PAYLOAD_PREFIX_LENGTH = 5;
|
||||
|
||||
public static String name() {
|
||||
return "SchemaRegistry";
|
||||
}
|
||||
|
||||
private static final String SCHEMA_REGISTRY = "schemaRegistry";
|
||||
|
||||
private SchemaRegistryClient schemaRegistryClient;
|
||||
private List<String> schemaRegistryUrls;
|
||||
private String valueSchemaNameTemplate;
|
||||
|
@ -60,7 +53,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
return kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
|
||||
return kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.isPresent();
|
||||
}
|
||||
|
@ -68,7 +61,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public void autoConfigure(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
var urls = kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
|
||||
var urls = kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
|
||||
configure(
|
||||
|
@ -94,7 +87,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
var urls = serdeProperties.getListProperty("url", String.class)
|
||||
.or(() -> kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class))
|
||||
.or(() -> kafkaClusterProperties.getListProperty("schemaRegistry", String.class))
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
|
||||
configure(
|
||||
|
@ -224,9 +217,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
case AVRO -> new AvroJsonSchemaConverter()
|
||||
.convert(basePath, ((AvroSchema) parsedSchema).rawSchema())
|
||||
.toJson();
|
||||
case JSON ->
|
||||
//need to use confluent JsonSchema since it includes resolved references
|
||||
((JsonSchema) parsedSchema).rawSchema().toString();
|
||||
case JSON -> schema.getSchema();
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -258,27 +249,35 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
String subject = schemaSubject(topic, type);
|
||||
SchemaMetadata meta = getSchemaBySubject(subject)
|
||||
.orElseThrow(() -> new ValidationException(
|
||||
String.format("No schema for subject '%s' found", subject)));
|
||||
ParsedSchema schema = getSchemaById(meta.getId())
|
||||
.orElseThrow(() -> new IllegalStateException(
|
||||
String.format("Schema found for id %s, subject '%s'", meta.getId(), subject)));
|
||||
SchemaType schemaType = SchemaType.fromString(meta.getSchemaType())
|
||||
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + meta.getSchemaType()));
|
||||
var schema = getSchemaBySubject(subject)
|
||||
.orElseThrow(() -> new ValidationException(String.format("No schema for subject '%s' found", subject)));
|
||||
boolean isKey = type == Target.KEY;
|
||||
SchemaType schemaType = SchemaType.fromString(schema.getSchemaType())
|
||||
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + schema.getSchemaType()));
|
||||
return switch (schemaType) {
|
||||
case PROTOBUF -> input ->
|
||||
serializeProto(schemaRegistryClient, topic, type, (ProtobufSchema) schema, meta.getId(), input);
|
||||
case AVRO -> input ->
|
||||
serializeAvro((AvroSchema) schema, meta.getId(), input);
|
||||
case JSON -> input ->
|
||||
serializeJson((JsonSchema) schema, meta.getId(), input);
|
||||
case PROTOBUF -> new ProtobufSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
|
||||
case AVRO -> new AvroSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
|
||||
case JSON -> new JsonSchemaSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) -> {
|
||||
return new SrDeserializer(topic);
|
||||
}
|
||||
|
||||
///--------------------------------------------------------------
|
||||
|
||||
private static final byte SR_RECORD_MAGIC_BYTE = (byte) 0;
|
||||
private static final int SR_RECORD_PREFIX_LENGTH = 5;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
private class SrDeserializer implements Deserializer {
|
||||
|
||||
private final String topic;
|
||||
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
var schemaId = extractSchemaIdFromMsg(data);
|
||||
SchemaType format = getMessageFormatBySchemaId(schemaId);
|
||||
MessageFormatter formatter = schemaRegistryFormatters.get(format);
|
||||
|
@ -290,7 +289,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
"type", format.name()
|
||||
)
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private SchemaType getMessageFormatBySchemaId(int schemaId) {
|
||||
|
@ -302,7 +301,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
|
||||
private int extractSchemaIdFromMsg(byte[] data) {
|
||||
ByteBuffer buffer = ByteBuffer.wrap(data);
|
||||
if (buffer.remaining() >= SR_PAYLOAD_PREFIX_LENGTH && buffer.get() == SR_PAYLOAD_MAGIC_BYTE) {
|
||||
if (buffer.remaining() > SR_RECORD_PREFIX_LENGTH && buffer.get() == SR_RECORD_MAGIC_BYTE) {
|
||||
return buffer.getInt();
|
||||
}
|
||||
throw new ValidationException(
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
abstract class SchemaRegistrySerializer<T> implements Serde.Serializer {
|
||||
protected final Serializer<T> serializer;
|
||||
protected final String topic;
|
||||
protected final boolean isKey;
|
||||
protected final ParsedSchema schema;
|
||||
|
||||
@SneakyThrows
|
||||
protected SchemaRegistrySerializer(String topic, boolean isKey, SchemaRegistryClient client,
|
||||
SchemaMetadata schema) {
|
||||
this.topic = topic;
|
||||
this.isKey = isKey;
|
||||
this.serializer = createSerializer(client);
|
||||
this.schema = client.getSchemaById(schema.getId());
|
||||
}
|
||||
|
||||
protected abstract Serializer<T> createSerializer(SchemaRegistryClient client);
|
||||
|
||||
@Override
|
||||
public byte[] serialize(String input) {
|
||||
final T read = this.serialize(input, schema);
|
||||
return this.serializer.serialize(topic, read);
|
||||
}
|
||||
|
||||
protected abstract T serialize(String value, ParsedSchema schema);
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import io.confluent.kafka.schemaregistry.json.jackson.Jackson;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.MessageIndexes;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufSerializer;
|
||||
import io.confluent.kafka.serializers.subject.DefaultReferenceSubjectNameStrategy;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.HashMap;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.io.BinaryEncoder;
|
||||
import org.apache.avro.io.DatumWriter;
|
||||
import org.apache.avro.io.EncoderFactory;
|
||||
|
||||
final class Serialize {
|
||||
|
||||
private static final byte MAGIC = 0x0;
|
||||
private static final ObjectMapper JSON_SERIALIZE_MAPPER = Jackson.newObjectMapper(); //from confluent package
|
||||
|
||||
private Serialize() {
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant("AbstractKafkaJsonSchemaSerializer::serializeImpl")
|
||||
@SneakyThrows
|
||||
static byte[] serializeJson(JsonSchema schema, int schemaId, String value) {
|
||||
JsonNode json;
|
||||
try {
|
||||
json = JSON_SERIALIZE_MAPPER.readTree(value);
|
||||
} catch (JsonProcessingException e) {
|
||||
throw new ValidationException(String.format("'%s' is not valid json", value));
|
||||
}
|
||||
try {
|
||||
schema.validate(json);
|
||||
} catch (org.everit.json.schema.ValidationException e) {
|
||||
throw new ValidationException(
|
||||
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
|
||||
}
|
||||
try (var out = new ByteArrayOutputStream()) {
|
||||
out.write(MAGIC);
|
||||
out.write(schemaId(schemaId));
|
||||
out.write(JSON_SERIALIZE_MAPPER.writeValueAsBytes(json));
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant("AbstractKafkaProtobufSerializer::serializeImpl")
|
||||
@SneakyThrows
|
||||
static byte[] serializeProto(SchemaRegistryClient srClient,
|
||||
String topic,
|
||||
Serde.Target target,
|
||||
ProtobufSchema schema,
|
||||
int schemaId,
|
||||
String input) {
|
||||
// flags are tuned like in ProtobufSerializer by default
|
||||
boolean normalizeSchema = false;
|
||||
boolean autoRegisterSchema = false;
|
||||
boolean useLatestVersion = true;
|
||||
boolean latestCompatStrict = true;
|
||||
boolean skipKnownTypes = true;
|
||||
|
||||
schema = AbstractKafkaProtobufSerializer.resolveDependencies(
|
||||
srClient, normalizeSchema, autoRegisterSchema, useLatestVersion, latestCompatStrict,
|
||||
new HashMap<>(), skipKnownTypes, new DefaultReferenceSubjectNameStrategy(),
|
||||
topic, target == Serde.Target.KEY, schema
|
||||
);
|
||||
|
||||
DynamicMessage.Builder builder = schema.newMessageBuilder();
|
||||
JsonFormat.parser().merge(input, builder);
|
||||
Message message = builder.build();
|
||||
MessageIndexes indexes = schema.toMessageIndexes(message.getDescriptorForType().getFullName(), normalizeSchema);
|
||||
try (var out = new ByteArrayOutputStream()) {
|
||||
out.write(MAGIC);
|
||||
out.write(schemaId(schemaId));
|
||||
out.write(indexes.toByteArray());
|
||||
message.writeTo(out);
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant("AbstractKafkaAvroSerializer::serializeImpl")
|
||||
@SneakyThrows
|
||||
static byte[] serializeAvro(AvroSchema schema, int schemaId, String input) {
|
||||
var avroObject = JsonAvroConversion.convertJsonToAvro(input, schema.rawSchema());
|
||||
try (var out = new ByteArrayOutputStream()) {
|
||||
out.write(MAGIC);
|
||||
out.write(schemaId(schemaId));
|
||||
Schema rawSchema = schema.rawSchema();
|
||||
if (rawSchema.getType().equals(Schema.Type.BYTES)) {
|
||||
Preconditions.checkState(
|
||||
avroObject instanceof ByteBuffer,
|
||||
"Unrecognized bytes object of type: " + avroObject.getClass().getName()
|
||||
);
|
||||
out.write(((ByteBuffer) avroObject).array());
|
||||
} else {
|
||||
boolean useLogicalTypeConverters = true;
|
||||
BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
|
||||
DatumWriter<Object> writer =
|
||||
(DatumWriter<Object>) AvroSchemaUtils.getDatumWriter(avroObject, rawSchema, useLogicalTypeConverters);
|
||||
writer.write(avroObject, encoder);
|
||||
encoder.flush();
|
||||
}
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] schemaId(int id) {
|
||||
return ByteBuffer.allocate(Integer.BYTES).putInt(id).array();
|
||||
}
|
||||
}
|
|
@ -2,14 +2,12 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import com.google.common.collect.Streams;
|
||||
import com.google.common.collect.Table;
|
||||
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupOrderingDTO;
|
||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.SortOrderDTO;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -28,8 +26,11 @@ import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
|||
import org.apache.kafka.clients.admin.ConsumerGroupListing;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.ConsumerGroupState;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
|
@ -247,27 +248,25 @@ public class ConsumerGroupService {
|
|||
.flatMap(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)));
|
||||
}
|
||||
|
||||
public EnhancedConsumer createConsumer(KafkaCluster cluster) {
|
||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
|
||||
return createConsumer(cluster, Map.of());
|
||||
}
|
||||
|
||||
public EnhancedConsumer createConsumer(KafkaCluster cluster,
|
||||
Map<String, Object> properties) {
|
||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
|
||||
Map<String, Object> properties) {
|
||||
Properties props = new Properties();
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), props);
|
||||
props.putAll(cluster.getProperties());
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-consumer-" + System.currentTimeMillis());
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
|
||||
props.put(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, "false");
|
||||
props.putAll(properties);
|
||||
|
||||
return new EnhancedConsumer(
|
||||
props,
|
||||
cluster.getPollingSettings().getPollingThrottler(),
|
||||
ApplicationMetrics.forCluster(cluster)
|
||||
);
|
||||
return new KafkaConsumer<>(props);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -102,8 +102,7 @@ public class DeserializationService implements Closeable {
|
|||
valueSerde.deserializer(topic, Serde.Target.VALUE),
|
||||
fallbackSerde.getName(),
|
||||
fallbackSerde.deserializer(topic, Serde.Target.KEY),
|
||||
fallbackSerde.deserializer(topic, Serde.Target.VALUE),
|
||||
cluster.getMasking().getMaskerForTopic(topic)
|
||||
fallbackSerde.deserializer(topic, Serde.Target.VALUE)
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
@ -38,6 +39,7 @@ import org.springframework.stereotype.Service;
|
|||
import org.springframework.web.reactive.function.client.WebClientResponseException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Service
|
||||
@Slf4j
|
||||
|
@ -59,22 +61,39 @@ public class KafkaConnectService {
|
|||
public Flux<FullConnectorInfoDTO> getAllConnectors(final KafkaCluster cluster,
|
||||
@Nullable final String search) {
|
||||
return getConnects(cluster)
|
||||
.flatMap(connect ->
|
||||
getConnectorNamesWithErrorsSuppress(cluster, connect.getName())
|
||||
.flatMap(connectorName ->
|
||||
Mono.zip(
|
||||
getConnector(cluster, connect.getName(), connectorName),
|
||||
getConnectorConfig(cluster, connect.getName(), connectorName),
|
||||
getConnectorTasks(cluster, connect.getName(), connectorName).collectList(),
|
||||
getConnectorTopics(cluster, connect.getName(), connectorName)
|
||||
).map(tuple ->
|
||||
InternalConnectInfo.builder()
|
||||
.connector(tuple.getT1())
|
||||
.config(tuple.getT2())
|
||||
.tasks(tuple.getT3())
|
||||
.topics(tuple.getT4().getTopics())
|
||||
.build())))
|
||||
.map(kafkaConnectMapper::fullConnectorInfo)
|
||||
.flatMap(connect -> getConnectorNames(cluster, connect.getName()).map(cn -> Tuples.of(connect.getName(), cn)))
|
||||
.flatMap(pair -> getConnector(cluster, pair.getT1(), pair.getT2()))
|
||||
.flatMap(connector ->
|
||||
getConnectorConfig(cluster, connector.getConnect(), connector.getName())
|
||||
.map(config -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
.config(config)
|
||||
.build()
|
||||
)
|
||||
)
|
||||
.flatMap(connectInfo -> {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
return getConnectorTasks(cluster, connector.getConnect(), connector.getName())
|
||||
.collectList()
|
||||
.map(tasks -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
.config(connectInfo.getConfig())
|
||||
.tasks(tasks)
|
||||
.build()
|
||||
);
|
||||
})
|
||||
.flatMap(connectInfo -> {
|
||||
ConnectorDTO connector = connectInfo.getConnector();
|
||||
return getConnectorTopics(cluster, connector.getConnect(), connector.getName())
|
||||
.map(ct -> InternalConnectInfo.builder()
|
||||
.connector(connector)
|
||||
.config(connectInfo.getConfig())
|
||||
.tasks(connectInfo.getTasks())
|
||||
.topics(ct.getTopics())
|
||||
.build()
|
||||
);
|
||||
})
|
||||
.map(kafkaConnectMapper::fullConnectorInfoFromTuple)
|
||||
.filter(matchesSearchTerm(search));
|
||||
}
|
||||
|
||||
|
@ -113,11 +132,6 @@ public class KafkaConnectService {
|
|||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
// returns empty flux if there was an error communicating with Connect
|
||||
public Flux<String> getConnectorNamesWithErrorsSuppress(KafkaCluster cluster, String connectName) {
|
||||
return getConnectorNames(cluster, connectName).onErrorComplete();
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private List<String> parseConnectorsNamesStringToList(String json) {
|
||||
return objectMapper.readValue(json, new TypeReference<>() {
|
||||
|
|
|
@ -2,9 +2,10 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.emitter.BackwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardEmitter;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.MessageFilters;
|
||||
import com.provectus.kafka.ui.emitter.MessagesProcessing;
|
||||
import com.provectus.kafka.ui.emitter.TailingEmitter;
|
||||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
|
@ -17,6 +18,7 @@ import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
|||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.time.Instant;
|
||||
|
@ -43,6 +45,7 @@ import org.apache.kafka.common.TopicPartition;
|
|||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
|
@ -228,26 +231,56 @@ public class MessagesService {
|
|||
@Nullable String keySerde,
|
||||
@Nullable String valueSerde) {
|
||||
|
||||
var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
||||
var filter = getMsgFilter(query, filterQueryType);
|
||||
var emitter = switch (seekDirection) {
|
||||
case FORWARD -> new ForwardEmitter(
|
||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||
|
||||
var processing = new MessagesProcessing(
|
||||
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
|
||||
getMsgFilter(query, filterQueryType),
|
||||
seekDirection == SeekDirectionDTO.TAILING ? null : limit
|
||||
);
|
||||
|
||||
if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
||||
emitter = new ForwardRecordEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
||||
consumerPosition,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
);
|
||||
case BACKWARD -> new BackwardEmitter(
|
||||
} else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
||||
emitter = new BackwardRecordEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
|
||||
consumerPosition,
|
||||
limit,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
);
|
||||
case TAILING -> new TailingEmitter(
|
||||
} else {
|
||||
emitter = new TailingEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition, deserializer, filter, cluster.getPollingSettings()
|
||||
consumerPosition,
|
||||
processing,
|
||||
cluster.getPollingSettings()
|
||||
);
|
||||
};
|
||||
}
|
||||
return Flux.create(emitter)
|
||||
.map(getDataMasker(cluster, topic))
|
||||
.map(throttleUiPublish(seekDirection));
|
||||
}
|
||||
|
||||
private UnaryOperator<TopicMessageEventDTO> getDataMasker(KafkaCluster cluster, String topicName) {
|
||||
var keyMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.KEY);
|
||||
var valMasker = cluster.getMasking().getMaskingFunction(topicName, Serde.Target.VALUE);
|
||||
return evt -> {
|
||||
if (evt.getType() != TopicMessageEventDTO.TypeEnum.MESSAGE) {
|
||||
return evt;
|
||||
}
|
||||
return evt.message(
|
||||
evt.getMessage()
|
||||
.key(keyMasker.apply(evt.getMessage().getKey()))
|
||||
.content(valMasker.apply(evt.getMessage().getContent())));
|
||||
};
|
||||
}
|
||||
|
||||
private Predicate<TopicMessageDTO> getMsgFilter(String query,
|
||||
MessageFilterTypeDTO filterQueryType) {
|
||||
if (StringUtils.isEmpty(query)) {
|
||||
|
|
|
@ -15,8 +15,6 @@ import com.provectus.kafka.ui.exception.ValidationException;
|
|||
import com.provectus.kafka.ui.util.KafkaVersion;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import java.io.Closeable;
|
||||
import java.time.Duration;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
|
@ -31,7 +29,6 @@ import java.util.function.BiFunction;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
|
@ -56,7 +53,6 @@ import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
|||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.clients.admin.RecordsToDelete;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
|
@ -133,41 +129,38 @@ public class ReactiveAdminClient implements Closeable {
|
|||
Set<SupportedFeature> features,
|
||||
boolean topicDeletionIsAllowed) {
|
||||
|
||||
static final Duration UPDATE_DURATION = Duration.of(1, ChronoUnit.HOURS);
|
||||
|
||||
private static Mono<ConfigRelatedInfo> extract(AdminClient ac) {
|
||||
return ReactiveAdminClient.describeClusterImpl(ac, Set.of())
|
||||
.flatMap(desc -> {
|
||||
// choosing node from which we will get configs (starting with controller)
|
||||
var targetNodeId = Optional.ofNullable(desc.controller)
|
||||
.map(Node::id)
|
||||
.orElse(desc.getNodes().iterator().next().id());
|
||||
return loadBrokersConfig(ac, List.of(targetNodeId))
|
||||
.map(map -> map.isEmpty() ? List.<ConfigEntry>of() : map.get(targetNodeId))
|
||||
.flatMap(configs -> {
|
||||
String version = "1.0-UNKNOWN";
|
||||
boolean topicDeletionEnabled = true;
|
||||
for (ConfigEntry entry : configs) {
|
||||
if (entry.name().contains("inter.broker.protocol.version")) {
|
||||
version = entry.value();
|
||||
}
|
||||
if (entry.name().equals("delete.topic.enable")) {
|
||||
topicDeletionEnabled = Boolean.parseBoolean(entry.value());
|
||||
}
|
||||
}
|
||||
final String finalVersion = version;
|
||||
final boolean finalTopicDeletionEnabled = topicDeletionEnabled;
|
||||
return SupportedFeature.forVersion(ac, version)
|
||||
.map(features -> new ConfigRelatedInfo(finalVersion, features, finalTopicDeletionEnabled));
|
||||
});
|
||||
})
|
||||
.cache(UPDATE_DURATION);
|
||||
private static Mono<ConfigRelatedInfo> extract(AdminClient ac, int controllerId) {
|
||||
return loadBrokersConfig(ac, List.of(controllerId))
|
||||
.map(map -> map.isEmpty() ? List.<ConfigEntry>of() : map.get(controllerId))
|
||||
.flatMap(configs -> {
|
||||
String version = "1.0-UNKNOWN";
|
||||
boolean topicDeletionEnabled = true;
|
||||
for (ConfigEntry entry : configs) {
|
||||
if (entry.name().contains("inter.broker.protocol.version")) {
|
||||
version = entry.value();
|
||||
}
|
||||
if (entry.name().equals("delete.topic.enable")) {
|
||||
topicDeletionEnabled = Boolean.parseBoolean(entry.value());
|
||||
}
|
||||
}
|
||||
var builder = ConfigRelatedInfo.builder()
|
||||
.version(version)
|
||||
.topicDeletionIsAllowed(topicDeletionEnabled);
|
||||
return SupportedFeature.forVersion(ac, version)
|
||||
.map(features -> builder.features(features).build());
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public static Mono<ReactiveAdminClient> create(AdminClient adminClient) {
|
||||
Mono<ConfigRelatedInfo> configRelatedInfoMono = ConfigRelatedInfo.extract(adminClient);
|
||||
return configRelatedInfoMono.map(info -> new ReactiveAdminClient(adminClient, configRelatedInfoMono, info));
|
||||
return describeClusterImpl(adminClient, Set.of())
|
||||
// choosing node from which we will get configs (starting with controller)
|
||||
.flatMap(descr -> descr.controller != null
|
||||
? Mono.just(descr.controller)
|
||||
: Mono.justOrEmpty(descr.nodes.stream().findFirst())
|
||||
)
|
||||
.flatMap(node -> ConfigRelatedInfo.extract(adminClient, node.id()))
|
||||
.map(info -> new ReactiveAdminClient(adminClient, info));
|
||||
}
|
||||
|
||||
|
||||
|
@ -177,7 +170,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
.doOnError(th -> !(th instanceof SecurityDisabledException)
|
||||
&& !(th instanceof InvalidRequestException)
|
||||
&& !(th instanceof UnsupportedVersionException),
|
||||
th -> log.debug("Error checking if security enabled", th))
|
||||
th -> log.warn("Error checking if security enabled", th))
|
||||
.onErrorReturn(false);
|
||||
}
|
||||
|
||||
|
@ -209,8 +202,6 @@ public class ReactiveAdminClient implements Closeable {
|
|||
|
||||
@Getter(AccessLevel.PACKAGE) // visible for testing
|
||||
private final AdminClient client;
|
||||
private final Mono<ConfigRelatedInfo> configRelatedInfoMono;
|
||||
|
||||
private volatile ConfigRelatedInfo configRelatedInfo;
|
||||
|
||||
public Set<SupportedFeature> getClusterFeatures() {
|
||||
|
@ -237,7 +228,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
if (controller == null) {
|
||||
return Mono.empty();
|
||||
}
|
||||
return configRelatedInfoMono
|
||||
return ConfigRelatedInfo.extract(client, controller.id())
|
||||
.doOnNext(info -> this.configRelatedInfo = info)
|
||||
.then();
|
||||
}
|
||||
|
@ -660,21 +651,6 @@ public class ReactiveAdminClient implements Closeable {
|
|||
return toMono(client.alterReplicaLogDirs(replicaAssignment).all());
|
||||
}
|
||||
|
||||
// returns tp -> list of active producer's states (if any)
|
||||
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(String topic) {
|
||||
return describeTopic(topic)
|
||||
.map(td -> client.describeProducers(
|
||||
IntStream.range(0, td.partitions().size())
|
||||
.mapToObj(i -> new TopicPartition(topic, i))
|
||||
.toList()
|
||||
).all()
|
||||
)
|
||||
.flatMap(ReactiveAdminClient::toMono)
|
||||
.map(map -> map.entrySet().stream()
|
||||
.filter(e -> !e.getValue().activeProducers().isEmpty()) // skipping partitions without producers
|
||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().activeProducers())));
|
||||
}
|
||||
|
||||
private Mono<Void> incrementalAlterConfig(String topicName,
|
||||
List<ConfigEntry> currentConfigs,
|
||||
Map<String, String> newConfigs) {
|
||||
|
|
|
@ -14,7 +14,8 @@ import com.provectus.kafka.ui.sr.model.CompatibilityLevelChange;
|
|||
import com.provectus.kafka.ui.sr.model.NewSubject;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaSubject;
|
||||
import com.provectus.kafka.ui.util.ReactiveFailover;
|
||||
import java.nio.charset.Charset;
|
||||
import com.provectus.kafka.ui.util.WebClientConfigurator;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.AllArgsConstructor;
|
||||
|
@ -91,7 +92,7 @@ public class SchemaRegistryService {
|
|||
private Mono<SubjectWithCompatibilityLevel> getSchemaSubject(KafkaCluster cluster, String schemaName,
|
||||
String version) {
|
||||
return api(cluster)
|
||||
.mono(c -> c.getSubjectVersion(schemaName, version, false))
|
||||
.mono(c -> c.getSubjectVersion(schemaName, version))
|
||||
.zipWith(getSchemaCompatibilityInfoOrGlobal(cluster, schemaName))
|
||||
.map(t -> new SubjectWithCompatibilityLevel(t.getT1(), t.getT2()))
|
||||
.onErrorResume(WebClientResponseException.NotFound.class, th -> Mono.error(new SchemaNotFoundException()));
|
||||
|
@ -125,7 +126,7 @@ public class SchemaRegistryService {
|
|||
.onErrorMap(WebClientResponseException.Conflict.class,
|
||||
th -> new SchemaCompatibilityException())
|
||||
.onErrorMap(WebClientResponseException.UnprocessableEntity.class,
|
||||
th -> new ValidationException("Invalid schema. Error from registry: " + th.getResponseBodyAsString()))
|
||||
th -> new ValidationException("Invalid schema"))
|
||||
.then(getLatestSchemaVersionBySubject(cluster, subject));
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue