Compare commits

..

52 commits

Author SHA1 Message Date
Vladysl
83b5a60cc0
added commons-collections4 library instead of commons-collections (#4427)
Co-authored-by: Narekmat <47845266+Narekmat@users.noreply.github.com>
2024-04-08 18:46:54 +00:00
Vladysl
3dc4446321
added env variable filtering.groovy.enabled which allows to enable/disable groovy script executions (#4426) 2024-04-08 22:30:38 +04:00
dependabot[bot]
53a6553765
Bump vite-tsconfig-paths from 4.2.0 to 4.2.1 in /kafka-ui-react-app (#4215)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 14:17:40 +04:00
dependabot[bot]
fc97dfa874
Bump react-router-dom from 6.3.0 to 6.15.0 in /kafka-ui-react-app (#4217)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-13 14:17:26 +04:00
Edward Muller
68f08a0c9b
Bump Spring Boot (#4227) 2023-09-13 14:08:13 +04:00
dependabot[bot]
cc12814a95
Bump @typescript-eslint/parser from 5.29.0 to 5.62.0 in /kafka-ui-react-app (#4214)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 18:23:14 +04:00
dependabot[bot]
5d5358010b
Bump @testing-library/dom from 9.0.0 to 9.3.1 in /kafka-ui-react-app (#4216)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 18:23:05 +04:00
dependabot[bot]
de2f06ccf8
Bump react-hot-toast from 2.4.0 to 2.4.1 in /kafka-ui-react-app (#4218)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 18:22:49 +04:00
dependabot[bot]
ff106a2061
Bump @openapitools/openapi-generator-cli from 2.5.2 to 2.7.0 in /kafka-ui-react-app (#4219)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 18:22:37 +04:00
dependabot[bot]
c00cb320cd
Bump @types/lossless-json from 1.0.1 to 1.0.2 in /kafka-ui-react-app (#4220)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-12 18:22:23 +04:00
Victoria Miltcheva
8a1e9ad8e8
Docs: Fix persistent installation link in README (#4207) 2023-09-07 22:26:16 +04:00
dependabot[bot]
39bb860f8e
Bump allure.version from 2.22.2 to 2.23.0 (#4039)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-06 14:45:09 +04:00
dependabot[bot]
f66d234d83
Bump git-commit-id-plugin from 4.0.0 to 4.9.10 (#4028)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-06 14:44:51 +04:00
dependabot[bot]
68a7268f8b
Bump maven-surefire-plugin from 2.22.2 to 3.1.2 (#4026)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-06 14:44:34 +04:00
dependabot[bot]
aca3d25dc8
Bump ingestion-contract-client from 0.1.23 to 0.1.26 (#4047)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-06 14:43:45 +04:00
dependabot[bot]
0616883fee
Bump eslint from 8.16.0 to 8.48.0 in /kafka-ui-react-app (#4184)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-06 14:33:49 +04:00
Ilya Kuramshin
59584ed369
BE: Add a common prefix for all app metrics (#4177)
Co-authored-by: iliax <ikuramshin@provectus.com>
2023-09-05 18:03:09 +04:00
Mihai Alexandru Agache
bbb739af92
FE: Fix dateTimeHelpers.spec.ts linter error (#4090)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
2023-09-05 18:01:45 +04:00
dependabot[bot]
145bf07b5d
Bump jest-environment-jsdom from 29.5.0 to 29.6.4 in /kafka-ui-react-app (#4183)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 17:42:55 +04:00
dependabot[bot]
ceb821acdf
Bump sass from 1.52.3 to 1.66.1 in /kafka-ui-react-app (#4185)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 17:42:46 +04:00
dependabot[bot]
d2b0cc51e3
Bump json-schema-faker from 0.5.0-rcv.44 to 0.5.3 in /kafka-ui-react-app (#4189)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 17:42:35 +04:00
dependabot[bot]
9e7bc02c8a
Bump eslint-plugin-jsx-a11y from 6.5.1 to 6.7.1 in /kafka-ui-react-app (#4159)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:33:07 +04:00
dependabot[bot]
2836b2f5d2
Bump eslint-config-prettier from 8.5.0 to 9.0.0 in /kafka-ui-react-app (#4182)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:32:37 +04:00
dependabot[bot]
a47848f809
Bump vite-tsconfig-paths from 4.0.2 to 4.2.0 in /kafka-ui-react-app (#4186)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:31:50 +04:00
dependabot[bot]
5c9fb994a4
Bump react and @types/react in /kafka-ui-react-app (#4187)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:31:27 +04:00
dependabot[bot]
14efe9da1e
Bump @jest/types from 29.5.0 to 29.6.3 in /kafka-ui-react-app (#4188)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:31:09 +04:00
dependabot[bot]
6676747606
Bump aquasecurity/trivy-action from 0.11.2 to 0.12.0 (#4190)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 13:30:50 +04:00
Ilya Kuramshin
b0583a3ca7
BE: Refactor SchemaRegistry serialization logic (#4116)
Co-authored-by: iliax <ikuramshin@provectus.com>
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
2023-08-30 12:39:58 +04:00
Ilya Kuramshin
4ec7975b2e
BE: Implement topic active producers API (#4121)
Co-authored-by: iliax <ikuramshin@provectus.com>
2023-08-30 12:38:54 +04:00
dependabot[bot]
c05abc1e0a
Bump eslint-import-resolver-node from 0.3.6 to 0.3.9 in /kafka-ui-react-app (#4166)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-30 12:32:47 +04:00
Evgeny Petrushevsky
729ca79581
BE: Fix cognito roles extractor regression (#4171)
Co-authored-by: Evgeny Petrushevsky <evgenuypetrushevskiv8@gmail.com>
2023-08-30 12:29:36 +04:00
Shubhadeep Das
80024c8758
BE: Fix sonar code smells (#3971)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
2023-08-29 17:54:21 +04:00
Prady
0d6f293ab9
FE: Topic analysis: Updated style for percentage of completion (#4123) 2023-08-29 17:53:13 +04:00
dependabot[bot]
8f2a29d15d
Bump aws-actions/configure-aws-credentials from 2 to 3 (#4169)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:46:10 +04:00
dependabot[bot]
552691fc5d
Bump use-debounce from 9.0.3 to 9.0.4 in /kafka-ui-react-app (#4160)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:45:24 +04:00
dependabot[bot]
342b534ac9
Bump jest from 29.5.0 to 29.6.4 in /kafka-ui-react-app (#4161)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:45:02 +04:00
dependabot[bot]
2051f6f653
Bump eslint-plugin-prettier from 4.0.0 to 4.2.1 in /kafka-ui-react-app (#4162)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:44:42 +04:00
dependabot[bot]
b2b02a5d60
Bump @hookform/error-message from 2.0.0 to 2.0.1 in /kafka-ui-react-app (#4163)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:44:23 +04:00
dependabot[bot]
d7eb3ba99e
Bump eslint-plugin-react-hooks from 4.5.0 to 4.6.0 in /kafka-ui-react-app (#4165)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:43:59 +04:00
dependabot[bot]
7de883d3ab
Bump @types/eventsource from 1.1.8 to 1.1.11 in /kafka-ui-react-app (#4167)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:43:36 +04:00
dependabot[bot]
4519d9a48c
Bump lossless-json from 2.0.8 to 2.0.11 in /kafka-ui-react-app (#4168)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-29 17:43:20 +04:00
Mitsuaki Ito
cca2c96997
FE: Fix active controller badge on invalid node (#4085)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
2023-08-23 11:41:46 +00:00
dependabot[bot]
844eb17d7a
Bump tough-cookie from 4.0.0 to 4.1.3 in /kafka-ui-react-app (#4150)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-23 14:28:47 +04:00
dependabot[bot]
37a6e62684
Bump vite from 4.0.0 to 4.0.5 in /kafka-ui-react-app (#4147)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-23 13:48:54 +04:00
dependabot[bot]
4f211b39ba
Bump word-wrap from 1.2.3 to 1.2.5 in /kafka-ui-react-app (#4151)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-23 13:48:31 +04:00
dependabot[bot]
8d35761b8d
Bump json5 from 1.0.1 to 1.0.2 in /kafka-ui-react-app (#4148)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-23 13:46:35 +04:00
dependabot[bot]
b12a0634a0
Bump semver from 6.3.0 to 6.3.1 in /kafka-ui-react-app (#4149)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-23 12:50:24 +04:00
Roman Zabaluev
8d402798c5
FE: Bump node & pnpm versions (#4146) 2023-08-23 11:15:32 +04:00
Malav Mevada
ed9f91fd8a
FE: Broker: Config: Implement search by the Value (#3804)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
2023-08-23 10:51:37 +04:00
pvmsikrsna
d2a5acc82d
FE: Topics: Minor fixes for Create Topic form (#3969)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
2023-08-23 10:46:03 +04:00
p-eye
7a82079471
FE: Topics: Do not send properties which are not defined explicitly (#4097) 2023-08-23 10:42:09 +04:00
Ilya Kuramshin
9acbf2b681
BE: Minor restart logic refactoring (#4140)
Co-authored-by: iliax <ikuramshin@provectus.com>
2023-08-21 09:36:17 +00:00
105 changed files with 3166 additions and 3361 deletions

View file

@ -31,7 +31,7 @@ jobs:
echo "Packer will be triggered in this dir $WORK_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}

View file

@ -45,7 +45,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -42,7 +42,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -55,7 +55,7 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.11.2
uses: aquasecurity/trivy-action@0.12.0
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"

View file

@ -15,7 +15,7 @@ jobs:
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -24,7 +24,7 @@ jobs:
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -18,7 +18,7 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}

View file

@ -11,7 +11,7 @@ jobs:
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -25,11 +25,11 @@ jobs:
ref: ${{ github.event.pull_request.head.sha }}
- uses: pnpm/action-setup@v2.4.0
with:
version: 7.4.0
version: 8.6.12
- name: Install node
uses: actions/setup-node@v3.8.1
with:
node-version: "16.15.0"
node-version: "18.17.1"
cache: "pnpm"
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
- name: Install Node dependencies

View file

@ -47,7 +47,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -26,7 +26,7 @@ jobs:
echo "Terraform will be triggered in this dir $TF_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v2
uses: aws-actions/configure-aws-credentials@v3
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -91,7 +91,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
Then access the web UI at [http://localhost:8080](http://localhost:8080)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
## Persistent installation

View file

@ -81,6 +81,12 @@
<groupId>io.confluent</groupId>
<artifactId>kafka-json-schema-serializer</artifactId>
<version>${confluent.version}</version>
<exclusions>
<exclusion>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
@ -135,6 +141,11 @@
<artifactId>commons-pool2</artifactId>
<version>${apache.commons.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>4.4</version>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
@ -238,8 +249,6 @@
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-ldap</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-jsr223</artifactId>
@ -394,7 +403,7 @@
<plugin>
<groupId>pl.project13.maven</groupId>
<artifactId>git-commit-id-plugin</artifactId>
<version>4.0.0</version>
<version>4.9.10</version>
<executions>
<execution>
<id>get-the-git-infos</id>

View file

@ -7,8 +7,6 @@ import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpResponse;
import org.springframework.web.reactive.config.CorsRegistry;
import org.springframework.web.reactive.config.WebFluxConfigurer;
import org.springframework.web.server.ServerWebExchange;
import org.springframework.web.server.WebFilter;
import org.springframework.web.server.WebFilterChain;

View file

@ -1,7 +1,6 @@
package com.provectus.kafka.ui.config;
import com.provectus.kafka.ui.exception.ValidationException;
import java.beans.Transient;
import javax.annotation.PostConstruct;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;

View file

@ -1,7 +1,6 @@
package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import lombok.Value;
public record AuthenticatedUser(String principal, Collection<String> groups) {

View file

@ -2,7 +2,6 @@ package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import java.util.Map;
import lombok.Value;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.oauth2.core.user.OAuth2User;

View file

@ -2,7 +2,6 @@ package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import java.util.Map;
import lombok.Value;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;

View file

@ -13,7 +13,6 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -38,7 +37,7 @@ public class AccessController implements AuthorizationApi {
.filter(role -> user.groups().contains(role.getName()))
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
.flatMap(Collection::stream)
.collect(Collectors.toList())
.toList()
)
.switchIfEmpty(Mono.just(Collections.emptyList()));
@ -70,10 +69,10 @@ public class AccessController implements AuthorizationApi {
.map(String::toUpperCase)
.map(this::mapAction)
.filter(Objects::nonNull)
.collect(Collectors.toList()));
.toList());
return dto;
})
.collect(Collectors.toList());
.toList();
}
@Nullable

View file

@ -82,12 +82,13 @@ public class ApplicationConfigController extends AbstractController implements A
.build();
return validateAccess(context)
.then(restartRequestDto)
.<ResponseEntity<Void>>map(dto -> {
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
restarter.requestRestart();
return ResponseEntity.ok().build();
.doOnNext(restartDto -> {
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
dynamicConfigOperations.persist(newConfig);
})
.doOnEach(sig -> audit(context, sig));
.doOnEach(sig -> audit(context, sig))
.doOnSuccess(dto -> restarter.requestRestart())
.map(dto -> ResponseEntity.ok().build());
}
@Override
@ -116,8 +117,8 @@ public class ApplicationConfigController extends AbstractController implements A
return validateAccess(context)
.then(configDto)
.flatMap(config -> {
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = propertiesStructure.getKafka();
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = newConfig.getKafka();
return validateClustersConfig(clustersProperties)
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
})

View file

@ -26,6 +26,8 @@ import reactor.core.publisher.Mono;
@RequiredArgsConstructor
@Slf4j
public class BrokersController extends AbstractController implements BrokersApi {
private static final String BROKER_ID = "brokerId";
private final BrokerService brokerService;
private final ClusterMapper clusterMapper;
@ -89,7 +91,7 @@ public class BrokersController extends AbstractController implements BrokersApi
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW)
.operationName("getBrokerConfig")
.operationParams(Map.of("brokerId", id))
.operationParams(Map.of(BROKER_ID, id))
.build();
return validateAccess(context).thenReturn(
@ -108,7 +110,7 @@ public class BrokersController extends AbstractController implements BrokersApi
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.operationName("updateBrokerTopicPartitionLogDir")
.operationParams(Map.of("brokerId", id))
.operationParams(Map.of(BROKER_ID, id))
.build();
return validateAccess(context).then(
@ -128,7 +130,7 @@ public class BrokersController extends AbstractController implements BrokersApi
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.operationName("updateBrokerConfigByName")
.operationParams(Map.of("brokerId", id))
.operationParams(Map.of(BROKER_ID, id))
.build();
return validateAccess(context).then(

View file

@ -22,7 +22,6 @@ import com.provectus.kafka.ui.service.OffsetsResetService;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
@ -200,7 +199,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
.stream()
.map(ConsumerGroupMapper::toDto)
.collect(Collectors.toList()));
.toList());
}
}

View file

@ -36,6 +36,7 @@ import reactor.core.publisher.Mono;
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
private static final String CONNECTOR_NAME = "connectorName";
private final KafkaConnectService kafkaConnectService;
@ -112,7 +113,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("deleteConnector")
.operationParams(Map.of("connectorName", connectName))
.operationParams(Map.of(CONNECTOR_NAME, connectName))
.build();
return validateAccess(context).then(
@ -180,7 +181,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("setConnectorConfig")
.operationParams(Map.of("connectorName", connectorName))
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
return validateAccess(context).then(
@ -207,7 +208,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(connectActions)
.operationName("updateConnectorState")
.operationParams(Map.of("connectorName", connectorName))
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
return validateAccess(context).then(
@ -227,7 +228,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.operationName("getConnectorTasks")
.operationParams(Map.of("connectorName", connectorName))
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
return validateAccess(context).thenReturn(
@ -247,7 +248,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
.operationName("restartConnectorTask")
.operationParams(Map.of("connectorName", connectorName))
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
return validateAccess(context).then(

View file

@ -5,14 +5,13 @@ import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_
import static com.provectus.kafka.ui.model.rbac.permission.TopicAction.MESSAGES_READ;
import static com.provectus.kafka.ui.serde.api.Serde.Target.KEY;
import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
import static java.util.stream.Collectors.toMap;
import com.provectus.kafka.ui.api.MessagesApi;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.MessageFilterIdDTO;
import com.provectus.kafka.ui.model.MessageFilterRegistrationDTO;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.SerdeUsageDTO;
@ -25,12 +24,17 @@ import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import com.provectus.kafka.ui.service.DeserializationService;
import com.provectus.kafka.ui.service.MessagesService;
import com.provectus.kafka.ui.util.DynamicConfigOperations;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import javax.annotation.Nullable;
import javax.validation.Valid;
import javax.validation.ValidationException;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.common.TopicPartition;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.server.ServerWebExchange;
@ -45,6 +49,7 @@ public class MessagesController extends AbstractController implements MessagesAp
private final MessagesService messagesService;
private final DeserializationService deserializationService;
private final DynamicConfigOperations dynamicConfigOperations;
@Override
public Mono<ResponseEntity<Void>> deleteTopicMessages(
@ -74,7 +79,6 @@ public class MessagesController extends AbstractController implements MessagesAp
.map(ResponseEntity::ok);
}
@Deprecated
@Override
public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessages(String clusterName,
String topicName,
@ -87,53 +91,41 @@ public class MessagesController extends AbstractController implements MessagesAp
String keySerde,
String valueSerde,
ServerWebExchange exchange) {
throw new ValidationException("Not supported");
}
@Override
public Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> getTopicMessagesV2(String clusterName, String topicName,
PollingModeDTO mode,
List<Integer> partitions,
Integer limit,
String stringFilter,
String smartFilterId,
Long offset,
Long timestamp,
String keySerde,
String valueSerde,
String cursor,
ServerWebExchange exchange) {
var contextBuilder = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.operationName("getTopicMessages");
if (StringUtils.isNoneEmpty(q) && MessageFilterTypeDTO.GROOVY_SCRIPT == filterQueryType) {
dynamicConfigOperations.checkIfFilteringGroovyEnabled();
}
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
contextBuilder.auditActions(AuditAction.VIEW);
}
var accessContext = contextBuilder.build();
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
Flux<TopicMessageEventDTO> messagesFlux;
if (cursor != null) {
messagesFlux = messagesService.loadMessages(getCluster(clusterName), topicName, cursor);
} else {
messagesFlux = messagesService.loadMessages(
getCluster(clusterName),
topicName,
ConsumerPosition.create(mode, topicName, partitions, timestamp, offset),
stringFilter,
smartFilterId,
limit,
keySerde,
valueSerde
);
}
return accessControlService.validateAccess(accessContext)
.then(Mono.just(ResponseEntity.ok(messagesFlux)))
.doOnEach(sig -> auditService.audit(accessContext, sig));
var positions = new ConsumerPosition(
seekType,
topicName,
parseSeekTo(topicName, seekType, seekTo)
);
Mono<ResponseEntity<Flux<TopicMessageEventDTO>>> job = Mono.just(
ResponseEntity.ok(
messagesService.loadMessages(
getCluster(clusterName), topicName, positions, q, filterQueryType,
limit, seekDirection, keySerde, valueSerde)
)
);
var context = contextBuilder.build();
return validateAccess(context)
.then(job)
.doOnEach(sig -> audit(context, sig));
}
@Override
@ -155,6 +147,34 @@ public class MessagesController extends AbstractController implements MessagesAp
).doOnEach(sig -> audit(context, sig));
}
/**
* The format is [partition]::[offset] for specifying offsets
* or [partition]::[timestamp in millis] for specifying timestamps.
*/
@Nullable
private Map<TopicPartition, Long> parseSeekTo(String topic, SeekTypeDTO seekType, List<String> seekTo) {
if (seekTo == null || seekTo.isEmpty()) {
if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) {
return null;
}
throw new ValidationException("seekTo should be set if seekType is " + seekType);
}
return seekTo.stream()
.map(p -> {
String[] split = p.split("::");
if (split.length != 2) {
throw new IllegalArgumentException(
"Wrong seekTo argument format. See API docs for details");
}
return Pair.of(
new TopicPartition(topic, Integer.parseInt(split[0])),
Long.parseLong(split[1])
);
})
.collect(toMap(Pair::getKey, Pair::getValue));
}
@Override
public Mono<ResponseEntity<TopicSerdeSuggestionDTO>> getSerdes(String clusterName,
String topicName,
@ -182,20 +202,7 @@ public class MessagesController extends AbstractController implements MessagesAp
);
}
@Override
public Mono<ResponseEntity<MessageFilterIdDTO>> registerFilter(String clusterName,
String topicName,
Mono<MessageFilterRegistrationDTO> registration,
ServerWebExchange exchange) {
final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.build());
return validateAccess.then(registration)
.map(reg -> messagesService.registerMessageFilter(reg.getFilterCode()))
.map(id -> ResponseEntity.ok(new MessageFilterIdDTO().id(id)));
}
}

View file

@ -15,7 +15,6 @@ import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
import com.provectus.kafka.ui.service.SchemaRegistryService;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -235,7 +234,7 @@ public class SchemasController extends AbstractController implements SchemasApi
List<String> subjectsToRender = filteredSubjects.stream()
.skip(subjectToSkip)
.limit(pageSize)
.collect(Collectors.toList());
.toList();
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));

View file

@ -22,6 +22,7 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
import com.provectus.kafka.ui.model.TopicCreationDTO;
import com.provectus.kafka.ui.model.TopicDTO;
import com.provectus.kafka.ui.model.TopicDetailsDTO;
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
import com.provectus.kafka.ui.model.TopicUpdateDTO;
import com.provectus.kafka.ui.model.TopicsResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
@ -143,7 +144,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
.map(lst -> lst.stream()
.map(InternalTopicConfig::from)
.map(clusterMapper::toTopicConfig)
.collect(toList()))
.toList())
.map(Flux::fromIterable)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
@ -207,7 +208,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
.map(topicsToRender ->
new TopicsResponseDTO()
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
.pageCount(totalPages));
})
.map(ResponseEntity::ok)
@ -327,6 +328,34 @@ public class TopicsController extends AbstractController implements TopicsApi {
.doOnEach(sig -> audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
String topicName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.operationName("getActiveProducerStates")
.build();
Comparator<TopicProducerStateDTO> ordering =
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
.flatMapMany(statesMap ->
Flux.fromStream(
statesMap.entrySet().stream()
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
.sorted(ordering)));
return validateAccess(context)
.thenReturn(states)
.map(ResponseEntity::ok)
.doOnEach(sig -> audit(context, sig));
}
private Comparator<InternalTopic> getComparatorForTopic(
TopicColumnsToSortDTO orderBy) {
var defaultComparator = Comparator.comparing(InternalTopic::getName);

View file

@ -1,7 +1,6 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import jakarta.annotation.Nullable;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
@ -22,14 +21,12 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
return records;
}
protected boolean isSendLimitReached() {
protected boolean sendLimitReached() {
return messagesProcessing.limitReached();
}
protected void send(FluxSink<TopicMessageEventDTO> sink,
Iterable<ConsumerRecord<Bytes, Bytes>> records,
@Nullable Cursor.Tracking cursor) {
messagesProcessing.send(sink, records, cursor);
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
messagesProcessing.send(sink, records);
}
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
@ -40,9 +37,8 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
messagesProcessing.sentConsumingInfo(sink, records);
}
// cursor is null if target partitions were fully polled (no, need to do paging)
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
messagesProcessing.sendFinishEvents(sink, cursor);
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
messagesProcessing.sendFinishEvent(sink);
sink.complete();
}
}

View file

@ -18,15 +18,18 @@ public class BackwardEmitter extends RangePollingEmitter {
int messagesPerPage,
ConsumerRecordDeserializer deserializer,
Predicate<TopicMessageDTO> filter,
PollingSettings pollingSettings,
Cursor.Tracking cursor) {
PollingSettings pollingSettings) {
super(
consumerSupplier,
consumerPosition,
messagesPerPage,
new MessagesProcessing(deserializer, filter, false, messagesPerPage),
pollingSettings,
cursor
new MessagesProcessing(
deserializer,
filter,
false,
messagesPerPage
),
pollingSettings
);
}

View file

@ -2,8 +2,6 @@ package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.model.TopicMessageNextPageCursorDTO;
import javax.annotation.Nullable;
import reactor.core.publisher.FluxSink;
class ConsumingStats {
@ -28,15 +26,10 @@ class ConsumingStats {
filterApplyErrors++;
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.DONE)
.cursor(
cursor != null
? new TopicMessageNextPageCursorDTO().id(cursor.registerCursor())
: null
)
.consuming(createConsumingStats())
);
}

View file

@ -1,90 +0,0 @@
package com.provectus.kafka.ui.emitter;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import java.util.HashMap;
import java.util.Map;
import java.util.function.Function;
import java.util.function.Predicate;
import org.apache.kafka.common.TopicPartition;
public record Cursor(ConsumerRecordDeserializer deserializer,
ConsumerPosition consumerPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
public static class Tracking {
private final ConsumerRecordDeserializer deserializer;
private final ConsumerPosition originalPosition;
private final Predicate<TopicMessageDTO> filter;
private final int limit;
private final Function<Cursor, String> registerAction;
//topic -> partition -> offset
private final Table<String, Integer, Long> trackingOffsets = HashBasedTable.create();
public Tracking(ConsumerRecordDeserializer deserializer,
ConsumerPosition originalPosition,
Predicate<TopicMessageDTO> filter,
int limit,
Function<Cursor, String> registerAction) {
this.deserializer = deserializer;
this.originalPosition = originalPosition;
this.filter = filter;
this.limit = limit;
this.registerAction = registerAction;
}
void trackOffset(String topic, int partition, long offset) {
trackingOffsets.put(topic, partition, offset);
}
void initOffsets(Map<TopicPartition, Long> initialSeekOffsets) {
initialSeekOffsets.forEach((tp, off) -> trackOffset(tp.topic(), tp.partition(), off));
}
private Map<TopicPartition, Long> getOffsetsMap(int offsetToAdd) {
Map<TopicPartition, Long> result = new HashMap<>();
trackingOffsets.rowMap()
.forEach((topic, partsMap) ->
partsMap.forEach((p, off) -> result.put(new TopicPartition(topic, p), off + offsetToAdd)));
return result;
}
String registerCursor() {
return registerAction.apply(
new Cursor(
deserializer,
new ConsumerPosition(
switch (originalPosition.pollingMode()) {
case TO_OFFSET, TO_TIMESTAMP, LATEST -> PollingModeDTO.TO_OFFSET;
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> PollingModeDTO.FROM_OFFSET;
case TAILING -> throw new IllegalStateException();
},
originalPosition.topic(),
originalPosition.partitions(),
null,
new ConsumerPosition.Offsets(
null,
getOffsetsMap(
switch (originalPosition.pollingMode()) {
case TO_OFFSET, TO_TIMESTAMP, LATEST -> 0;
// when doing forward polling we need to start from latest msg's offset + 1
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> 1;
case TAILING -> throw new IllegalStateException();
}
)
)
),
filter,
limit
)
);
}
}
}

View file

@ -18,15 +18,18 @@ public class ForwardEmitter extends RangePollingEmitter {
int messagesPerPage,
ConsumerRecordDeserializer deserializer,
Predicate<TopicMessageDTO> filter,
PollingSettings pollingSettings,
Cursor.Tracking cursor) {
PollingSettings pollingSettings) {
super(
consumerSupplier,
consumerPosition,
messagesPerPage,
new MessagesProcessing(deserializer, filter, true, messagesPerPage),
pollingSettings,
cursor
new MessagesProcessing(
deserializer,
filter,
true,
messagesPerPage
),
pollingSettings
);
}

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import groovy.json.JsonSlurper;
import java.util.function.Predicate;
@ -21,16 +22,23 @@ public class MessageFilters {
private MessageFilters() {
}
public static Predicate<TopicMessageDTO> noop() {
return e -> true;
public static Predicate<TopicMessageDTO> createMsgFilter(String query, MessageFilterTypeDTO type) {
switch (type) {
case STRING_CONTAINS:
return containsStringFilter(query);
case GROOVY_SCRIPT:
return groovyScriptFilter(query);
default:
throw new IllegalStateException("Unknown query type: " + type);
}
}
public static Predicate<TopicMessageDTO> containsStringFilter(String string) {
static Predicate<TopicMessageDTO> containsStringFilter(String string) {
return msg -> StringUtils.contains(msg.getKey(), string)
|| StringUtils.contains(msg.getContent(), string);
}
public static Predicate<TopicMessageDTO> groovyScriptFilter(String script) {
static Predicate<TopicMessageDTO> groovyScriptFilter(String script) {
var engine = getGroovyEngine();
var compiledScript = compileScript(engine, script);
var jsonSlurper = new JsonSlurper();

View file

@ -39,9 +39,7 @@ class MessagesProcessing {
return limit != null && sentMessages >= limit;
}
void send(FluxSink<TopicMessageEventDTO> sink,
Iterable<ConsumerRecord<Bytes, Bytes>> polled,
@Nullable Cursor.Tracking cursor) {
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
sortForSending(polled, ascendingSortBeforeSend)
.forEach(rec -> {
if (!limitReached() && !sink.isCancelled()) {
@ -55,9 +53,6 @@ class MessagesProcessing {
);
sentMessages++;
}
if (cursor != null) {
cursor.trackOffset(rec.topic(), rec.partition(), rec.offset());
}
} catch (Exception e) {
consumingStats.incFilterApplyError();
log.trace("Error applying filter for message {}", topicMessage);
@ -72,9 +67,9 @@ class MessagesProcessing {
}
}
void sendFinishEvents(FluxSink<TopicMessageEventDTO> sink, @Nullable Cursor.Tracking cursor) {
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
if (!sink.isCancelled()) {
consumingStats.sendFinishEvent(sink, cursor);
consumingStats.sendFinishEvent(sink);
}
}

View file

@ -1,12 +1,10 @@
package com.provectus.kafka.ui.emitter;
import com.google.common.base.Preconditions;
import com.google.common.collect.Sets;
import java.util.Collection;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.mutable.MutableLong;
@ -29,7 +27,7 @@ class OffsetsInfo {
this(consumer,
consumer.partitionsFor(topic).stream()
.map(pi -> new TopicPartition(topic, pi.partition()))
.collect(Collectors.toList())
.toList()
);
}
@ -63,8 +61,4 @@ class OffsetsInfo {
return cnt.getValue();
}
public Set<TopicPartition> allTargetPartitions() {
return Sets.union(nonEmptyPartitions, emptyPartitions);
}
}

View file

@ -3,7 +3,6 @@ package com.provectus.kafka.ui.emitter;
import java.time.Duration;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
@ -33,10 +32,6 @@ public record PolledRecords(int count,
return records.iterator();
}
public Set<TopicPartition> partitions() {
return records.partitions();
}
private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
int polledBytes = 0;
for (ConsumerRecord<Bytes, Bytes> rec : recs) {

View file

@ -17,7 +17,6 @@ import reactor.core.publisher.FluxSink;
abstract class RangePollingEmitter extends AbstractEmitter {
private final Supplier<EnhancedConsumer> consumerSupplier;
private final Cursor.Tracking cursor;
protected final ConsumerPosition consumerPosition;
protected final int messagesPerPage;
@ -25,13 +24,11 @@ abstract class RangePollingEmitter extends AbstractEmitter {
ConsumerPosition consumerPosition,
int messagesPerPage,
MessagesProcessing messagesProcessing,
PollingSettings pollingSettings,
Cursor.Tracking cursor) {
PollingSettings pollingSettings) {
super(messagesProcessing, pollingSettings);
this.consumerPosition = consumerPosition;
this.messagesPerPage = messagesPerPage;
this.consumerSupplier = consumerSupplier;
this.cursor = cursor;
}
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
@ -49,20 +46,18 @@ abstract class RangePollingEmitter extends AbstractEmitter {
try (EnhancedConsumer consumer = consumerSupplier.get()) {
sendPhase(sink, "Consumer created");
var seekOperations = SeekOperations.create(consumer, consumerPosition);
cursor.initOffsets(seekOperations.getOffsetsForSeek());
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
log.debug("Starting from offsets {}", pollRange);
while (!sink.isCancelled() && !pollRange.isEmpty() && !isSendLimitReached()) {
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
var polled = poll(consumer, sink, pollRange);
send(sink, polled, cursor);
send(sink, polled);
pollRange = nextPollingRange(pollRange, seekOperations);
}
if (sink.isCancelled()) {
log.debug("Polling finished due to sink cancellation");
}
sendFinishStatsAndCompleteSink(sink, pollRange.isEmpty() ? null : cursor);
sendFinishStatsAndCompleteSink(sink);
log.debug("Polling finished");
} catch (InterruptException kafkaInterruptException) {
log.debug("Polling finished due to thread interruption");

View file

@ -1,13 +1,13 @@
package com.provectus.kafka.ui.emitter;
import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.AccessLevel;
import lombok.RequiredArgsConstructor;
import org.apache.commons.lang3.mutable.MutableLong;
@ -22,11 +22,17 @@ public class SeekOperations {
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
OffsetsInfo offsetsInfo = consumerPosition.partitions().isEmpty()
? new OffsetsInfo(consumer, consumerPosition.topic())
: new OffsetsInfo(consumer, consumerPosition.partitions());
var offsetsToSeek = getOffsetsForSeek(consumer, offsetsInfo, consumerPosition);
return new SeekOperations(consumer, offsetsInfo, offsetsToSeek);
OffsetsInfo offsetsInfo;
if (consumerPosition.getSeekTo() == null) {
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
} else {
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet());
}
return new SeekOperations(
consumer,
offsetsInfo,
getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo())
);
}
public void assignAndSeekNonEmptyPartitions() {
@ -69,26 +75,27 @@ public class SeekOperations {
@VisibleForTesting
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
OffsetsInfo offsetsInfo,
ConsumerPosition position) {
return switch (position.pollingMode()) {
case TAILING -> consumer.endOffsets(offsetsInfo.allTargetPartitions());
case LATEST -> consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
case EARLIEST -> consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
case FROM_OFFSET, TO_OFFSET -> fixOffsets(offsetsInfo, requireNonNull(position.offsets()));
case FROM_TIMESTAMP, TO_TIMESTAMP ->
offsetsForTimestamp(consumer, position.pollingMode(), offsetsInfo, requireNonNull(position.timestamp()));
};
SeekTypeDTO seekType,
@Nullable Map<TopicPartition, Long> seekTo) {
switch (seekType) {
case LATEST:
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
case BEGINNING:
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
case OFFSET:
Preconditions.checkNotNull(seekTo);
return fixOffsets(offsetsInfo, seekTo);
case TIMESTAMP:
Preconditions.checkNotNull(seekTo);
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
default:
throw new IllegalStateException();
}
}
private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo,
ConsumerPosition.Offsets positionOffset) {
var offsets = new HashMap<TopicPartition, Long>();
if (positionOffset.offset() != null) {
offsetsInfo.getNonEmptyPartitions().forEach(tp -> offsets.put(tp, positionOffset.offset()));
} else {
offsets.putAll(requireNonNull(positionOffset.tpOffsets()));
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
}
private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo, Map<TopicPartition, Long> offsets) {
offsets = new HashMap<>(offsets);
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
Map<TopicPartition, Long> result = new HashMap<>();
offsets.forEach((tp, targetOffset) -> {
@ -105,25 +112,13 @@ public class SeekOperations {
return result;
}
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer,
PollingModeDTO pollingMode,
OffsetsInfo offsetsInfo,
Long timestamp) {
Map<TopicPartition, Long> timestamps = new HashMap<>();
offsetsInfo.getNonEmptyPartitions().forEach(tp -> timestamps.put(tp, timestamp));
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
Map<TopicPartition, Long> timestamps) {
timestamps = new HashMap<>(timestamps);
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
Map<TopicPartition, Long> result = new HashMap<>();
consumer.offsetsForTimes(timestamps).forEach((tp, offsetAndTimestamp) -> {
if (offsetAndTimestamp == null) {
if (pollingMode == TO_TIMESTAMP && offsetsInfo.getNonEmptyPartitions().contains(tp)) {
// if no offset was returned this means that *all* timestamps are lower
// than target timestamp. Is case of TO_OFFSET mode we need to read from the ending of tp
result.put(tp, offsetsInfo.getEndOffsets().get(tp));
}
} else {
result.put(tp, offsetAndTimestamp.offset());
}
});
return result;
return consumer.offsetsForTimes(timestamps).entrySet().stream()
.filter(e -> e.getValue() != null)
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
}
}

View file

@ -35,7 +35,7 @@ public class TailingEmitter extends AbstractEmitter {
while (!sink.isCancelled()) {
sendPhase(sink, "Polling");
var polled = poll(sink, consumer);
send(sink, polled, null);
send(sink, polled);
}
sink.complete();
log.debug("Tailing finished");
@ -55,4 +55,5 @@ public class TailingEmitter extends AbstractEmitter {
consumer.assign(seekOffsets.keySet());
seekOffsets.forEach(consumer::seek);
}
}

View file

@ -106,7 +106,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
err.setFieldName(e.getKey());
err.setRestrictions(List.copyOf(e.getValue()));
return err;
}).collect(Collectors.toList());
}).toList();
var message = fieldsErrors.isEmpty()
? exception.getMessage()

View file

@ -30,11 +30,12 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
import com.provectus.kafka.ui.model.TopicConfigDTO;
import com.provectus.kafka.ui.model.TopicDTO;
import com.provectus.kafka.ui.model.TopicDetailsDTO;
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
import com.provectus.kafka.ui.service.metrics.RawMetric;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.ProducerState;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
@ -54,7 +55,7 @@ public interface ClusterMapper {
default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
return new ClusterMetricsDTO()
.items(metrics.getSummarizedMetrics().map(this::convert).collect(Collectors.toList()));
.items(metrics.getSummarizedMetrics().map(this::convert).toList());
}
private MetricDTO convert(RawMetric rawMetric) {
@ -66,7 +67,7 @@ public interface ClusterMapper {
default BrokerMetricsDTO toBrokerMetrics(List<RawMetric> metrics) {
return new BrokerMetricsDTO()
.metrics(metrics.stream().map(this::convert).collect(Collectors.toList()));
.metrics(metrics.stream().map(this::convert).toList());
}
@Mapping(target = "isSensitive", source = "sensitive")
@ -107,7 +108,7 @@ public interface ClusterMapper {
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
return map.values().stream().map(this::toPartition).collect(Collectors.toList());
return map.values().stream().map(this::toPartition).toList();
}
default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
@ -118,6 +119,17 @@ public interface ClusterMapper {
return brokerDiskUsage;
}
default TopicProducerStateDTO map(int partition, ProducerState state) {
return new TopicProducerStateDTO()
.partition(partition)
.producerId(state.producerId())
.producerEpoch(state.producerEpoch())
.lastSequence(state.lastSequence())
.lastTimestampMs(state.lastTimestamp())
.coordinatorEpoch(state.coordinatorEpoch().stream().boxed().findAny().orElse(null))
.currentTransactionStartOffset(state.currentTransactionStartOffset().stream().boxed().findAny().orElse(null));
}
static KafkaAclDTO.OperationEnum mapAclOperation(AclOperation operation) {
return switch (operation) {
case ALL -> KafkaAclDTO.OperationEnum.ALL;

View file

@ -21,7 +21,7 @@ public class DescribeLogDirsMapper {
return logDirsInfo.entrySet().stream().map(
mapEntry -> mapEntry.getValue().entrySet().stream()
.map(e -> toBrokerLogDirs(mapEntry.getKey(), e.getKey(), e.getValue()))
.collect(Collectors.toList())
.toList()
).flatMap(Collection::stream).collect(Collectors.toList());
}
@ -35,7 +35,7 @@ public class DescribeLogDirsMapper {
var topics = logDirInfo.replicaInfos.entrySet().stream()
.collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
.map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
.collect(Collectors.toList());
.toList();
result.setTopics(topics);
return result;
}
@ -48,7 +48,7 @@ public class DescribeLogDirsMapper {
topic.setPartitions(
partitions.stream().map(
e -> topicPartitionLogDir(
broker, e.getKey().partition(), e.getValue())).collect(Collectors.toList())
broker, e.getKey().partition(), e.getValue())).toList()
);
return topic;
}

View file

@ -1,72 +1,14 @@
package com.provectus.kafka.ui.model;
import com.google.common.base.Preconditions;
import com.provectus.kafka.ui.exception.ValidationException;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.Value;
import org.apache.kafka.common.TopicPartition;
public record ConsumerPosition(PollingModeDTO pollingMode,
String topic,
List<TopicPartition> partitions, //all partitions if list is empty
@Nullable Long timestamp,
@Nullable Offsets offsets) {
public record Offsets(@Nullable Long offset, //should be applied to all partitions
@Nullable Map<TopicPartition, Long> tpOffsets) {
public Offsets {
// only one of properties should be set
Preconditions.checkArgument((offset == null && tpOffsets != null) || (offset != null && tpOffsets == null));
}
}
public static ConsumerPosition create(PollingModeDTO pollingMode,
String topic,
@Nullable List<Integer> partitions,
@Nullable Long timestamp,
@Nullable Long offset) {
@Nullable var offsets = parseAndValidateOffsets(pollingMode, offset);
var topicPartitions = Optional.ofNullable(partitions).orElse(List.of())
.stream()
.map(p -> new TopicPartition(topic, p))
.collect(Collectors.toList());
// if offsets are specified - inferring partitions list from there
topicPartitions = (offsets != null && offsets.tpOffsets() != null)
? List.copyOf(offsets.tpOffsets().keySet())
: topicPartitions;
return new ConsumerPosition(
pollingMode,
topic,
topicPartitions,
validateTimestamp(pollingMode, timestamp),
offsets
);
}
private static Long validateTimestamp(PollingModeDTO pollingMode, @Nullable Long ts) {
if (pollingMode == PollingModeDTO.FROM_TIMESTAMP || pollingMode == PollingModeDTO.TO_TIMESTAMP) {
if (ts == null) {
throw new ValidationException("timestamp not provided for " + pollingMode);
}
}
return ts;
}
private static Offsets parseAndValidateOffsets(PollingModeDTO pollingMode,
@Nullable Long offset) {
if (pollingMode == PollingModeDTO.FROM_OFFSET || pollingMode == PollingModeDTO.TO_OFFSET) {
if (offset == null) {
throw new ValidationException("offsets not provided for " + pollingMode);
}
return new Offsets(offset, null);
}
return null;
}
@Value
public class ConsumerPosition {
SeekTypeDTO seekType;
String topic;
@Nullable
Map<TopicPartition, Long> seekTo; // null if positioning should apply to all tps
}

View file

@ -44,7 +44,7 @@ public class InternalLogDirStats {
topicMap.getValue().replicaInfos.entrySet().stream()
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
)
).collect(toList());
).toList();
partitionsStats = topicPartitions.stream().collect(
groupingBy(

View file

@ -52,6 +52,8 @@ public class AccessContext {
}
public static final class AccessContextBuilder {
private static final String ACTIONS_NOT_PRESENT = "actions not present";
private Collection<ApplicationConfigAction> applicationConfigActions = Collections.emptySet();
private String cluster;
private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
@ -75,7 +77,7 @@ public class AccessContext {
}
public AccessContextBuilder applicationConfigActions(ApplicationConfigAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.applicationConfigActions = List.of(actions);
return this;
}
@ -86,7 +88,7 @@ public class AccessContext {
}
public AccessContextBuilder clusterConfigActions(ClusterConfigAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.clusterConfigActions = List.of(actions);
return this;
}
@ -97,7 +99,7 @@ public class AccessContext {
}
public AccessContextBuilder topicActions(TopicAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.topicActions = List.of(actions);
return this;
}
@ -108,7 +110,7 @@ public class AccessContext {
}
public AccessContextBuilder consumerGroupActions(ConsumerGroupAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.consumerGroupActions = List.of(actions);
return this;
}
@ -119,7 +121,7 @@ public class AccessContext {
}
public AccessContextBuilder connectActions(ConnectAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.connectActions = List.of(actions);
return this;
}
@ -135,25 +137,25 @@ public class AccessContext {
}
public AccessContextBuilder schemaActions(SchemaAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.schemaActions = List.of(actions);
return this;
}
public AccessContextBuilder ksqlActions(KsqlAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.ksqlActions = List.of(actions);
return this;
}
public AccessContextBuilder aclActions(AclAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.aclActions = List.of(actions);
return this;
}
public AccessContextBuilder auditActions(AuditAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
this.auditActions = List.of(actions);
return this;
}

View file

@ -23,7 +23,7 @@ import javax.annotation.Nullable;
import lombok.EqualsAndHashCode;
import lombok.Getter;
import lombok.ToString;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.springframework.util.Assert;
@Getter

View file

@ -6,7 +6,6 @@ import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.util.Base64;
import java.util.Map;
import java.util.Optional;
import org.apache.kafka.common.header.Headers;
public class Base64Serde implements BuiltInSerde {

View file

@ -28,6 +28,23 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
private static final JsonMapper JSON_MAPPER = createMapper();
private static final String ASSIGNMENT = "assignment";
private static final String CLIENT_HOST = "client_host";
private static final String CLIENT_ID = "client_id";
private static final String COMMIT_TIMESTAMP = "commit_timestamp";
private static final String CURRENT_STATE_TIMESTAMP = "current_state_timestamp";
private static final String GENERATION = "generation";
private static final String LEADER = "leader";
private static final String MEMBERS = "members";
private static final String MEMBER_ID = "member_id";
private static final String METADATA = "metadata";
private static final String OFFSET = "offset";
private static final String PROTOCOL = "protocol";
private static final String PROTOCOL_TYPE = "protocol_type";
private static final String REBALANCE_TIMEOUT = "rebalance_timeout";
private static final String SESSION_TIMEOUT = "session_timeout";
private static final String SUBSCRIPTION = "subscription";
public static final String TOPIC = "__consumer_offsets";
public static String name() {
@ -116,128 +133,128 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
private Deserializer valueDeserializer() {
final Schema commitOffsetSchemaV0 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
new Field(OFFSET, Type.INT64, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
);
final Schema commitOffsetSchemaV1 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, ""),
new Field(OFFSET, Type.INT64, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
new Field("expire_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV2 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
new Field(OFFSET, Type.INT64, ""),
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
);
final Schema commitOffsetSchemaV3 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field(OFFSET, Type.INT64, ""),
new Field("leader_epoch", Type.INT32, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
new Field(METADATA, Type.STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
);
final Schema commitOffsetSchemaV4 = new Schema(
new Field("offset", Type.INT64, ""),
new Field(OFFSET, Type.INT64, ""),
new Field("leader_epoch", Type.INT32, ""),
new Field("metadata", Type.COMPACT_STRING, ""),
new Field("commit_timestamp", Type.INT64, ""),
new Field(METADATA, Type.COMPACT_STRING, ""),
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
Field.TaggedFieldsSection.of()
);
final Schema metadataSchema0 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
)), "")
);
final Schema metadataSchema1 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
)), "")
);
final Schema metadataSchema2 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
)), "")
);
final Schema metadataSchema3 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field(PROTOCOL_TYPE, Type.STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
new Field(LEADER, Type.NULLABLE_STRING, ""),
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
new Field(MEMBERS, new ArrayOf(new Schema(
new Field(MEMBER_ID, Type.STRING, ""),
new Field("group_instance_id", Type.NULLABLE_STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
new Field(CLIENT_ID, Type.STRING, ""),
new Field(CLIENT_HOST, Type.STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.BYTES, ""),
new Field(ASSIGNMENT, Type.BYTES, "")
)), "")
);
final Schema metadataSchema4 =
new Schema(
new Field("protocol_type", Type.COMPACT_STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new CompactArrayOf(new Schema(
new Field("member_id", Type.COMPACT_STRING, ""),
new Field(PROTOCOL_TYPE, Type.COMPACT_STRING, ""),
new Field(GENERATION, Type.INT32, ""),
new Field(PROTOCOL, Type.COMPACT_NULLABLE_STRING, ""),
new Field(LEADER, Type.COMPACT_NULLABLE_STRING, ""),
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
new Field(MEMBERS, new CompactArrayOf(new Schema(
new Field(MEMBER_ID, Type.COMPACT_STRING, ""),
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
new Field("client_id", Type.COMPACT_STRING, ""),
new Field("client_host", Type.COMPACT_STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.COMPACT_BYTES, ""),
new Field("assignment", Type.COMPACT_BYTES, ""),
new Field(CLIENT_ID, Type.COMPACT_STRING, ""),
new Field(CLIENT_HOST, Type.COMPACT_STRING, ""),
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
new Field(SESSION_TIMEOUT, Type.INT32, ""),
new Field(SUBSCRIPTION, Type.COMPACT_BYTES, ""),
new Field(ASSIGNMENT, Type.COMPACT_BYTES, ""),
Field.TaggedFieldsSection.of()
)), ""),
Field.TaggedFieldsSection.of()
@ -249,7 +266,7 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
short version = bb.getShort();
// ideally, we should distinguish if value is commit or metadata
// by checking record's key, but our current serde structure doesn't allow that.
// so, we trying to parse into metadata first and after into commit msg
// so, we are trying to parse into metadata first and after into commit msg
try {
result = toJson(
switch (version) {

View file

@ -2,7 +2,6 @@ package com.provectus.kafka.ui.serdes.builtin;
import com.google.common.primitives.Ints;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.util.Map;

View file

@ -1,46 +0,0 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.KafkaAvroSerializer;
import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
class AvroSchemaRegistrySerializer extends SchemaRegistrySerializer<Object> {
AvroSchemaRegistrySerializer(String topic, boolean isKey,
SchemaRegistryClient client,
SchemaMetadata schema) {
super(topic, isKey, client, schema);
}
@Override
protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaAvroSerializer(client);
serializer.configure(
Map.of(
"schema.registry.url", "wontbeused",
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
KafkaAvroSerializerConfig.AVRO_USE_LOGICAL_TYPE_CONVERTERS_CONFIG, true,
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
),
isKey
);
return serializer;
}
@Override
protected Object serialize(String value, ParsedSchema schema) {
try {
return JsonAvroConversion.convertJsonToAvro(value, ((AvroSchema) schema).rawSchema());
} catch (Throwable e) {
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
}
}
}

View file

@ -1,79 +0,0 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
import java.util.Map;
import org.apache.kafka.common.serialization.Serializer;
class JsonSchemaSchemaRegistrySerializer extends SchemaRegistrySerializer<JsonNode> {
private static final ObjectMapper MAPPER = new ObjectMapper();
JsonSchemaSchemaRegistrySerializer(String topic,
boolean isKey,
SchemaRegistryClient client,
SchemaMetadata schema) {
super(topic, isKey, client, schema);
}
@Override
protected Serializer<JsonNode> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaJsonSchemaSerializerWithoutSchemaInfer(client);
serializer.configure(
Map.of(
"schema.registry.url", "wontbeused",
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
),
isKey
);
return serializer;
}
@Override
protected JsonNode serialize(String value, ParsedSchema schema) {
try {
JsonNode json = MAPPER.readTree(value);
((JsonSchema) schema).validate(json);
return json;
} catch (JsonProcessingException e) {
throw new ValidationException(String.format("'%s' is not valid json", value));
} catch (org.everit.json.schema.ValidationException e) {
throw new ValidationException(
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
}
}
@KafkaClientInternalsDependant
private class KafkaJsonSchemaSerializerWithoutSchemaInfer
extends KafkaJsonSchemaSerializer<JsonNode> {
KafkaJsonSchemaSerializerWithoutSchemaInfer(SchemaRegistryClient client) {
super(client);
}
/**
* Need to override original method because it tries to infer schema from input
* by checking 'schema' json field or @Schema annotation on input class, which is not
* possible in our case. So, we just skip all infer logic and pass schema directly.
*/
@Override
public byte[] serialize(String topic, JsonNode rec) {
return super.serializeImpl(
super.getSubjectName(topic, isKey, rec, schema),
rec,
(JsonSchema) schema
);
}
}
}

View file

@ -1,50 +0,0 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.Message;
import com.google.protobuf.util.JsonFormat;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
import java.util.Map;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Serializer;
class ProtobufSchemaRegistrySerializer extends SchemaRegistrySerializer<Message> {
@SneakyThrows
public ProtobufSchemaRegistrySerializer(String topic, boolean isKey,
SchemaRegistryClient client, SchemaMetadata schema) {
super(topic, isKey, client, schema);
}
@Override
protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
var serializer = new KafkaProtobufSerializer<>(client);
serializer.configure(
Map.of(
"schema.registry.url", "wontbeused",
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
),
isKey
);
return serializer;
}
@Override
protected Message serialize(String value, ParsedSchema schema) {
ProtobufSchema protobufSchema = (ProtobufSchema) schema;
DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
try {
JsonFormat.parser().merge(value, builder);
return builder.build();
} catch (Throwable e) {
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
}
}
}

View file

@ -1,5 +1,8 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeAvro;
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeJson;
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeProto;
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.BASIC_AUTH_CREDENTIALS_SOURCE;
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.USER_INFO_CONFIG;
@ -7,7 +10,6 @@ import com.google.common.annotations.VisibleForTesting;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.RecordHeaders;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
@ -32,17 +34,21 @@ import java.util.Map;
import java.util.Optional;
import java.util.concurrent.Callable;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
import org.apache.kafka.common.config.SslConfigs;
public class SchemaRegistrySerde implements BuiltInSerde {
private static final byte SR_PAYLOAD_MAGIC_BYTE = 0x0;
private static final int SR_PAYLOAD_PREFIX_LENGTH = 5;
public static String name() {
return "SchemaRegistry";
}
private static final String SCHEMA_REGISTRY = "schemaRegistry";
private SchemaRegistryClient schemaRegistryClient;
private List<String> schemaRegistryUrls;
private String valueSchemaNameTemplate;
@ -54,7 +60,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
@Override
public boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
return kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
return kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
.filter(lst -> !lst.isEmpty())
.isPresent();
}
@ -62,7 +68,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
@Override
public void autoConfigure(PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
var urls = kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
var urls = kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
.filter(lst -> !lst.isEmpty())
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
configure(
@ -88,7 +94,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
var urls = serdeProperties.getListProperty("url", String.class)
.or(() -> kafkaClusterProperties.getListProperty("schemaRegistry", String.class))
.or(() -> kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class))
.filter(lst -> !lst.isEmpty())
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
configure(
@ -219,8 +225,8 @@ public class SchemaRegistrySerde implements BuiltInSerde {
.convert(basePath, ((AvroSchema) parsedSchema).rawSchema())
.toJson();
case JSON ->
//need to use confluent JsonSchema since it includes resolved references
((JsonSchema) parsedSchema).rawSchema().toString();
//need to use confluent JsonSchema since it includes resolved references
((JsonSchema) parsedSchema).rawSchema().toString();
};
}
@ -252,35 +258,27 @@ public class SchemaRegistrySerde implements BuiltInSerde {
@Override
public Serializer serializer(String topic, Target type) {
String subject = schemaSubject(topic, type);
var schema = getSchemaBySubject(subject)
.orElseThrow(() -> new ValidationException(String.format("No schema for subject '%s' found", subject)));
boolean isKey = type == Target.KEY;
SchemaType schemaType = SchemaType.fromString(schema.getSchemaType())
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + schema.getSchemaType()));
SchemaMetadata meta = getSchemaBySubject(subject)
.orElseThrow(() -> new ValidationException(
String.format("No schema for subject '%s' found", subject)));
ParsedSchema schema = getSchemaById(meta.getId())
.orElseThrow(() -> new IllegalStateException(
String.format("Schema found for id %s, subject '%s'", meta.getId(), subject)));
SchemaType schemaType = SchemaType.fromString(meta.getSchemaType())
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + meta.getSchemaType()));
return switch (schemaType) {
case PROTOBUF -> new ProtobufSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
case AVRO -> new AvroSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
case JSON -> new JsonSchemaSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
case PROTOBUF -> input ->
serializeProto(schemaRegistryClient, topic, type, (ProtobufSchema) schema, meta.getId(), input);
case AVRO -> input ->
serializeAvro((AvroSchema) schema, meta.getId(), input);
case JSON -> input ->
serializeJson((JsonSchema) schema, meta.getId(), input);
};
}
@Override
public Deserializer deserializer(String topic, Target type) {
return new SrDeserializer(topic);
}
///--------------------------------------------------------------
private static final byte SR_RECORD_MAGIC_BYTE = (byte) 0;
private static final int SR_RECORD_PREFIX_LENGTH = 5;
@RequiredArgsConstructor
private class SrDeserializer implements Deserializer {
private final String topic;
@Override
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
return (headers, data) -> {
var schemaId = extractSchemaIdFromMsg(data);
SchemaType format = getMessageFormatBySchemaId(schemaId);
MessageFormatter formatter = schemaRegistryFormatters.get(format);
@ -292,7 +290,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
"type", format.name()
)
);
}
};
}
private SchemaType getMessageFormatBySchemaId(int schemaId) {
@ -304,7 +302,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
private int extractSchemaIdFromMsg(byte[] data) {
ByteBuffer buffer = ByteBuffer.wrap(data);
if (buffer.remaining() > SR_RECORD_PREFIX_LENGTH && buffer.get() == SR_RECORD_MAGIC_BYTE) {
if (buffer.remaining() >= SR_PAYLOAD_PREFIX_LENGTH && buffer.get() == SR_PAYLOAD_MAGIC_BYTE) {
return buffer.getInt();
}
throw new ValidationException(

View file

@ -1,34 +0,0 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.provectus.kafka.ui.serde.api.Serde;
import io.confluent.kafka.schemaregistry.ParsedSchema;
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import lombok.SneakyThrows;
import org.apache.kafka.common.serialization.Serializer;
abstract class SchemaRegistrySerializer<T> implements Serde.Serializer {
protected final Serializer<T> serializer;
protected final String topic;
protected final boolean isKey;
protected final ParsedSchema schema;
@SneakyThrows
protected SchemaRegistrySerializer(String topic, boolean isKey, SchemaRegistryClient client,
SchemaMetadata schema) {
this.topic = topic;
this.isKey = isKey;
this.serializer = createSerializer(client);
this.schema = client.getSchemaById(schema.getId());
}
protected abstract Serializer<T> createSerializer(SchemaRegistryClient client);
@Override
public byte[] serialize(String input) {
final T read = this.serialize(input, schema);
return this.serializer.serialize(topic, read);
}
protected abstract T serialize(String value, ParsedSchema schema);
}

View file

@ -0,0 +1,126 @@
package com.provectus.kafka.ui.serdes.builtin.sr;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.base.Preconditions;
import com.google.protobuf.DynamicMessage;
import com.google.protobuf.Message;
import com.google.protobuf.util.JsonFormat;
import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.serde.api.Serde;
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.schemaregistry.json.jackson.Jackson;
import io.confluent.kafka.schemaregistry.protobuf.MessageIndexes;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufSerializer;
import io.confluent.kafka.serializers.subject.DefaultReferenceSubjectNameStrategy;
import java.io.ByteArrayOutputStream;
import java.nio.ByteBuffer;
import java.util.HashMap;
import lombok.SneakyThrows;
import org.apache.avro.Schema;
import org.apache.avro.io.BinaryEncoder;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.EncoderFactory;
final class Serialize {
private static final byte MAGIC = 0x0;
private static final ObjectMapper JSON_SERIALIZE_MAPPER = Jackson.newObjectMapper(); //from confluent package
private Serialize() {
}
@KafkaClientInternalsDependant("AbstractKafkaJsonSchemaSerializer::serializeImpl")
@SneakyThrows
static byte[] serializeJson(JsonSchema schema, int schemaId, String value) {
JsonNode json;
try {
json = JSON_SERIALIZE_MAPPER.readTree(value);
} catch (JsonProcessingException e) {
throw new ValidationException(String.format("'%s' is not valid json", value));
}
try {
schema.validate(json);
} catch (org.everit.json.schema.ValidationException e) {
throw new ValidationException(
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
}
try (var out = new ByteArrayOutputStream()) {
out.write(MAGIC);
out.write(schemaId(schemaId));
out.write(JSON_SERIALIZE_MAPPER.writeValueAsBytes(json));
return out.toByteArray();
}
}
@KafkaClientInternalsDependant("AbstractKafkaProtobufSerializer::serializeImpl")
@SneakyThrows
static byte[] serializeProto(SchemaRegistryClient srClient,
String topic,
Serde.Target target,
ProtobufSchema schema,
int schemaId,
String input) {
// flags are tuned like in ProtobufSerializer by default
boolean normalizeSchema = false;
boolean autoRegisterSchema = false;
boolean useLatestVersion = true;
boolean latestCompatStrict = true;
boolean skipKnownTypes = true;
schema = AbstractKafkaProtobufSerializer.resolveDependencies(
srClient, normalizeSchema, autoRegisterSchema, useLatestVersion, latestCompatStrict,
new HashMap<>(), skipKnownTypes, new DefaultReferenceSubjectNameStrategy(),
topic, target == Serde.Target.KEY, schema
);
DynamicMessage.Builder builder = schema.newMessageBuilder();
JsonFormat.parser().merge(input, builder);
Message message = builder.build();
MessageIndexes indexes = schema.toMessageIndexes(message.getDescriptorForType().getFullName(), normalizeSchema);
try (var out = new ByteArrayOutputStream()) {
out.write(MAGIC);
out.write(schemaId(schemaId));
out.write(indexes.toByteArray());
message.writeTo(out);
return out.toByteArray();
}
}
@KafkaClientInternalsDependant("AbstractKafkaAvroSerializer::serializeImpl")
@SneakyThrows
static byte[] serializeAvro(AvroSchema schema, int schemaId, String input) {
var avroObject = JsonAvroConversion.convertJsonToAvro(input, schema.rawSchema());
try (var out = new ByteArrayOutputStream()) {
out.write(MAGIC);
out.write(schemaId(schemaId));
Schema rawSchema = schema.rawSchema();
if (rawSchema.getType().equals(Schema.Type.BYTES)) {
Preconditions.checkState(
avroObject instanceof ByteBuffer,
"Unrecognized bytes object of type: " + avroObject.getClass().getName()
);
out.write(((ByteBuffer) avroObject).array());
} else {
boolean useLogicalTypeConverters = true;
BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
DatumWriter<Object> writer =
(DatumWriter<Object>) AvroSchemaUtils.getDatumWriter(avroObject, rawSchema, useLogicalTypeConverters);
writer.write(avroObject, encoder);
encoder.flush();
}
return out.toByteArray();
}
}
private static byte[] schemaId(int id) {
return ByteBuffer.allocate(Integer.BYTES).putInt(id).array();
}
}

View file

@ -1,13 +1,8 @@
package com.provectus.kafka.ui.service;
import com.google.common.base.Charsets;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.google.common.hash.Hashing;
import com.google.common.util.concurrent.RateLimiter;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.emitter.BackwardEmitter;
import com.provectus.kafka.ui.emitter.Cursor;
import com.provectus.kafka.ui.emitter.ForwardEmitter;
import com.provectus.kafka.ui.emitter.MessageFilters;
import com.provectus.kafka.ui.emitter.TailingEmitter;
@ -16,12 +11,12 @@ import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
import com.provectus.kafka.ui.util.SslPropertiesUtil;
import java.time.Instant;
@ -32,12 +27,12 @@ import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.producer.KafkaProducer;
@ -55,11 +50,8 @@ import reactor.core.scheduler.Schedulers;
@Slf4j
public class MessagesService {
private static final long SALT_FOR_HASHING = ThreadLocalRandom.current().nextLong();
private static final int DEFAULT_MAX_PAGE_SIZE = 500;
private static final int DEFAULT_PAGE_SIZE = 100;
// limiting UI messages rate to 20/sec in tailing mode
private static final int TAILING_UI_MESSAGE_THROTTLE_RATE = 20;
@ -69,12 +61,6 @@ public class MessagesService {
private final int maxPageSize;
private final int defaultPageSize;
private final Cache<String, Predicate<TopicMessageDTO>> registeredFilters = CacheBuilder.newBuilder()
.maximumSize(PollingCursorsStorage.MAX_SIZE)
.build();
private final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
public MessagesService(AdminClientService adminClientService,
DeserializationService deserializationService,
ConsumerGroupService consumerGroupService,
@ -100,7 +86,10 @@ public class MessagesService {
public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
Predicate<TopicMessageDTO> predicate;
try {
predicate = MessageFilters.groovyScriptFilter(execData.getFilterCode());
predicate = MessageFilters.createMsgFilter(
execData.getFilterCode(),
MessageFilterTypeDTO.GROOVY_SCRIPT
);
} catch (Exception e) {
log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
return new SmartFilterTestExecutionResultDTO()
@ -208,103 +197,67 @@ public class MessagesService {
return new KafkaProducer<>(properties);
}
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
String topic,
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
ConsumerPosition consumerPosition,
@Nullable String containsStringFilter,
@Nullable String filterId,
@Nullable Integer limit,
@Nullable String query,
MessageFilterTypeDTO filterQueryType,
@Nullable Integer pageSize,
SeekDirectionDTO seekDirection,
@Nullable String keySerde,
@Nullable String valueSerde) {
return loadMessages(
cluster,
topic,
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde),
consumerPosition,
getMsgFilter(containsStringFilter, filterId),
fixPageSize(limit)
);
}
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic, String cursorId) {
Cursor cursor = cursorsStorage.getCursor(cursorId)
.orElseThrow(() -> new ValidationException("Next page cursor not found. Maybe it was evicted from cache."));
return loadMessages(
cluster,
topic,
cursor.deserializer(),
cursor.consumerPosition(),
cursor.filter(),
cursor.limit()
);
}
private Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster,
String topic,
ConsumerRecordDeserializer deserializer,
ConsumerPosition consumerPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
return withExistingTopic(cluster, topic)
.flux()
.publishOn(Schedulers.boundedElastic())
.flatMap(td -> loadMessagesImpl(cluster, deserializer, consumerPosition, filter, limit));
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
filterQueryType, fixPageSize(pageSize), seekDirection, keySerde, valueSerde));
}
private int fixPageSize(@Nullable Integer pageSize) {
return Optional.ofNullable(pageSize)
.filter(ps -> ps > 0 && ps <= maxPageSize)
.orElse(defaultPageSize);
}
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
ConsumerRecordDeserializer deserializer,
String topic,
ConsumerPosition consumerPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
var emitter = switch (consumerPosition.pollingMode()) {
case TO_OFFSET, TO_TIMESTAMP, LATEST -> new BackwardEmitter(
@Nullable String query,
MessageFilterTypeDTO filterQueryType,
int limit,
SeekDirectionDTO seekDirection,
@Nullable String keySerde,
@Nullable String valueSerde) {
var deserializer = deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
var filter = getMsgFilter(query, filterQueryType);
var emitter = switch (seekDirection) {
case FORWARD -> new ForwardEmitter(
() -> consumerGroupService.createConsumer(cluster),
consumerPosition,
limit,
deserializer,
filter,
cluster.getPollingSettings(),
cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
);
case FROM_OFFSET, FROM_TIMESTAMP, EARLIEST -> new ForwardEmitter(
case BACKWARD -> new BackwardEmitter(
() -> consumerGroupService.createConsumer(cluster),
consumerPosition,
limit,
deserializer,
filter,
cluster.getPollingSettings(),
cursorsStorage.createNewCursor(deserializer, consumerPosition, filter, limit)
consumerPosition, limit, deserializer, filter, cluster.getPollingSettings()
);
case TAILING -> new TailingEmitter(
() -> consumerGroupService.createConsumer(cluster),
consumerPosition,
deserializer,
filter,
cluster.getPollingSettings()
consumerPosition, deserializer, filter, cluster.getPollingSettings()
);
};
return Flux.create(emitter)
.map(throttleUiPublish(consumerPosition.pollingMode()));
.map(throttleUiPublish(seekDirection));
}
private Predicate<TopicMessageDTO> getMsgFilter(@Nullable String containsStrFilter,
@Nullable String smartFilterId) {
Predicate<TopicMessageDTO> messageFilter = MessageFilters.noop();
if (containsStrFilter != null) {
messageFilter = messageFilter.and(MessageFilters.containsStringFilter(containsStrFilter));
private Predicate<TopicMessageDTO> getMsgFilter(String query,
MessageFilterTypeDTO filterQueryType) {
if (StringUtils.isEmpty(query)) {
return evt -> true;
}
if (smartFilterId != null) {
var registered = registeredFilters.getIfPresent(smartFilterId);
if (registered == null) {
throw new ValidationException("No filter was registered with id " + smartFilterId);
}
messageFilter = messageFilter.and(registered);
}
return messageFilter;
return MessageFilters.createMsgFilter(query, filterQueryType);
}
private <T> UnaryOperator<T> throttleUiPublish(PollingModeDTO pollingMode) {
if (pollingMode == PollingModeDTO.TAILING) {
private <T> UnaryOperator<T> throttleUiPublish(SeekDirectionDTO seekDirection) {
if (seekDirection == SeekDirectionDTO.TAILING) {
RateLimiter rateLimiter = RateLimiter.create(TAILING_UI_MESSAGE_THROTTLE_RATE);
return m -> {
rateLimiter.acquire(1);
@ -316,22 +269,4 @@ public class MessagesService {
return UnaryOperator.identity();
}
private int fixPageSize(@Nullable Integer pageSize) {
return Optional.ofNullable(pageSize)
.filter(ps -> ps > 0 && ps <= maxPageSize)
.orElse(defaultPageSize);
}
public String registerMessageFilter(String groovyCode) {
String saltedCode = groovyCode + SALT_FOR_HASHING;
String filterId = Hashing.sha256()
.hashString(saltedCode, Charsets.UTF_8)
.toString()
.substring(0, 8);
if (registeredFilters.getIfPresent(filterId) == null) {
registeredFilters.put(filterId, MessageFilters.groovyScriptFilter(groovyCode));
}
return filterId;
}
}

View file

@ -1,45 +0,0 @@
package com.provectus.kafka.ui.service;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.Cache;
import com.google.common.cache.CacheBuilder;
import com.provectus.kafka.ui.emitter.Cursor;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import java.util.Map;
import java.util.Optional;
import java.util.function.Predicate;
import org.apache.commons.lang3.RandomStringUtils;
public class PollingCursorsStorage {
public static final int MAX_SIZE = 10_000;
private final Cache<String, Cursor> cursorsCache = CacheBuilder.newBuilder()
.maximumSize(MAX_SIZE)
.build();
public Cursor.Tracking createNewCursor(ConsumerRecordDeserializer deserializer,
ConsumerPosition originalPosition,
Predicate<TopicMessageDTO> filter,
int limit) {
return new Cursor.Tracking(deserializer, originalPosition, filter, limit, this::register);
}
public Optional<Cursor> getCursor(String id) {
return Optional.ofNullable(cursorsCache.getIfPresent(id));
}
public String register(Cursor cursor) {
var id = RandomStringUtils.random(8, true, true);
cursorsCache.put(id, cursor);
return id;
}
@VisibleForTesting
public Map<String, Cursor> asMap() {
return cursorsCache.asMap();
}
}

View file

@ -31,6 +31,7 @@ import java.util.function.BiFunction;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import lombok.AccessLevel;
@ -55,6 +56,7 @@ import org.apache.kafka.clients.admin.NewPartitionReassignment;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.ProducerState;
import org.apache.kafka.clients.admin.RecordsToDelete;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
@ -658,6 +660,21 @@ public class ReactiveAdminClient implements Closeable {
return toMono(client.alterReplicaLogDirs(replicaAssignment).all());
}
// returns tp -> list of active producer's states (if any)
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(String topic) {
return describeTopic(topic)
.map(td -> client.describeProducers(
IntStream.range(0, td.partitions().size())
.mapToObj(i -> new TopicPartition(topic, i))
.toList()
).all()
)
.flatMap(ReactiveAdminClient::toMono)
.map(map -> map.entrySet().stream()
.filter(e -> !e.getValue().activeProducers().isEmpty()) // skipping partitions without producers
.collect(toMap(Map.Entry::getKey, e -> e.getValue().activeProducers())));
}
private Mono<Void> incrementalAlterConfig(String topicName,
List<ConfigEntry> currentConfigs,
Map<String, String> newConfigs) {

View file

@ -39,6 +39,7 @@ import org.apache.kafka.clients.admin.ConfigEntry;
import org.apache.kafka.clients.admin.NewPartitionReassignment;
import org.apache.kafka.clients.admin.NewPartitions;
import org.apache.kafka.clients.admin.OffsetSpec;
import org.apache.kafka.clients.admin.ProducerState;
import org.apache.kafka.clients.admin.TopicDescription;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
@ -459,6 +460,11 @@ public class TopicsService {
);
}
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(KafkaCluster cluster, String topic) {
return adminClientService.get(cluster)
.flatMap(ac -> ac.getActiveProducersState(topic));
}
private Mono<List<String>> filterExisting(KafkaCluster cluster, Collection<String> topics) {
return adminClientService.get(cluster)
.flatMap(ac -> ac.listTopics(true))

View file

@ -1,6 +1,6 @@
package com.provectus.kafka.ui.service.analyze;
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
import com.provectus.kafka.ui.emitter.SeekOperations;
@ -14,7 +14,6 @@ import java.io.Closeable;
import java.time.Duration;
import java.time.Instant;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
@ -105,8 +104,7 @@ public class TopicAnalysisService {
consumer.partitionsFor(topicId.topicName)
.forEach(tp -> partitionStats.put(tp.partition(), new TopicAnalysisStats()));
var seekOperations =
SeekOperations.create(consumer, new ConsumerPosition(EARLIEST, topicId.topicName, List.of(), null, null));
var seekOperations = SeekOperations.create(consumer, new ConsumerPosition(BEGINNING, topicId.topicName, null));
long summaryOffsetsRange = seekOperations.summaryOffsetsRange();
seekOperations.assignAndSeekNonEmptyPartitions();

View file

@ -8,7 +8,7 @@ import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Stream;
import javax.annotation.Nullable;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.opendatadiscovery.oddrn.JdbcUrlParser;
import org.opendatadiscovery.oddrn.model.HivePath;
import org.opendatadiscovery.oddrn.model.MysqlPath;

View file

@ -11,6 +11,9 @@ import org.apache.kafka.common.Node;
class WellKnownMetrics {
private static final String BROKER_TOPIC_METRICS = "BrokerTopicMetrics";
private static final String FIFTEEN_MINUTE_RATE = "FifteenMinuteRate";
// per broker
final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
@ -36,15 +39,15 @@ class WellKnownMetrics {
if (!brokerBytesInFifteenMinuteRate.containsKey(node.id())
&& rawMetric.labels().size() == 1
&& "BytesInPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
&& containsIgnoreCase(name, "BrokerTopicMetrics")
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
brokerBytesInFifteenMinuteRate.put(node.id(), rawMetric.value());
}
if (!brokerBytesOutFifteenMinuteRate.containsKey(node.id())
&& rawMetric.labels().size() == 1
&& "BytesOutPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
&& containsIgnoreCase(name, "BrokerTopicMetrics")
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
brokerBytesOutFifteenMinuteRate.put(node.id(), rawMetric.value());
}
}
@ -53,8 +56,8 @@ class WellKnownMetrics {
String name = rawMetric.name();
String topic = rawMetric.labels().get("topic");
if (topic != null
&& containsIgnoreCase(name, "BrokerTopicMetrics")
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
String nameProperty = rawMetric.labels().get("name");
if ("BytesInPerSec".equalsIgnoreCase(nameProperty)) {
bytesInFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));

View file

@ -33,7 +33,7 @@ import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.core.env.Environment;
@ -52,6 +52,7 @@ import reactor.core.publisher.Mono;
public class AccessControlService {
private static final String ACCESS_DENIED = "Access denied";
private static final String ACTIONS_ARE_EMPTY = "actions are empty";
@Nullable
private final InMemoryReactiveClientRegistrationRepository clientRegistrationRepository;
@ -206,7 +207,7 @@ public class AccessControlService {
if (context.getTopic() == null && context.getTopicActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getTopicActions().isEmpty(), "actions are empty");
Assert.isTrue(!context.getTopicActions().isEmpty(), ACTIONS_ARE_EMPTY);
Set<String> requiredActions = context.getTopicActions()
.stream()
@ -243,7 +244,7 @@ public class AccessControlService {
if (context.getConsumerGroup() == null && context.getConsumerGroupActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getConsumerGroupActions().isEmpty(), "actions are empty");
Assert.isTrue(!context.getConsumerGroupActions().isEmpty(), ACTIONS_ARE_EMPTY);
Set<String> requiredActions = context.getConsumerGroupActions()
.stream()
@ -276,7 +277,7 @@ public class AccessControlService {
if (context.getSchema() == null && context.getSchemaActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getSchemaActions().isEmpty(), "actions are empty");
Assert.isTrue(!context.getSchemaActions().isEmpty(), ACTIONS_ARE_EMPTY);
Set<String> requiredActions = context.getSchemaActions()
.stream()
@ -309,7 +310,7 @@ public class AccessControlService {
if (context.getConnect() == null && context.getConnectActions().isEmpty()) {
return true;
}
Assert.isTrue(!context.getConnectActions().isEmpty(), "actions are empty");
Assert.isTrue(!context.getConnectActions().isEmpty(), ACTIONS_ARE_EMPTY);
Set<String> requiredActions = context.getConnectActions()
.stream()

View file

@ -59,8 +59,8 @@ public class CognitoAuthorityExtractor implements ProviderAuthorityExtractor {
.stream()
.filter(s -> s.getProvider().equals(Provider.OAUTH_COGNITO))
.filter(s -> s.getType().equals("group"))
.anyMatch(subject -> Stream.of(groups)
.map(Object::toString)
.anyMatch(subject -> groups
.stream()
.anyMatch(cognitoGroup -> cognitoGroup.equals(subject.getValue()))
))
.map(Role::getName)

View file

@ -18,6 +18,10 @@ import lombok.RequiredArgsConstructor;
@RequiredArgsConstructor(access = PRIVATE)
public class ApplicationMetrics {
// kafka-ui specific metrics prefix. Added to make it easier to distinguish kui metrics from
// other metrics, exposed by spring boot (like http stats, jvm, etc.)
private static final String COMMON_PREFIX = "kui_";
private final String clusterName;
private final MeterRegistry registry;
@ -40,7 +44,7 @@ public class ApplicationMetrics {
}
private Counter polledRecords(String topic) {
return Counter.builder("topic_records_polled")
return Counter.builder(COMMON_PREFIX + "topic_records_polled")
.description("Number of records polled from topic")
.tag("cluster", clusterName)
.tag("topic", topic)
@ -48,7 +52,7 @@ public class ApplicationMetrics {
}
private DistributionSummary polledBytes(String topic) {
return DistributionSummary.builder("topic_polled_bytes")
return DistributionSummary.builder(COMMON_PREFIX + "topic_polled_bytes")
.description("Bytes polled from kafka topic")
.tag("cluster", clusterName)
.tag("topic", topic)
@ -56,7 +60,7 @@ public class ApplicationMetrics {
}
private Timer pollTimer(String topic) {
return Timer.builder("topic_poll_time")
return Timer.builder(COMMON_PREFIX + "topic_poll_time")
.description("Time spend in polling for topic")
.tag("cluster", clusterName)
.tag("topic", topic)
@ -64,7 +68,7 @@ public class ApplicationMetrics {
}
private Counter pollThrottlingActivations() {
return Counter.builder("poll_throttling_activations")
return Counter.builder(COMMON_PREFIX + "poll_throttling_activations")
.description("Number of poll throttling activations")
.tag("cluster", clusterName)
.register(registry);
@ -72,7 +76,7 @@ public class ApplicationMetrics {
public AtomicInteger activeConsumers() {
var count = new AtomicInteger();
Gauge.builder("active_consumers", () -> count)
Gauge.builder(COMMON_PREFIX + "active_consumers", () -> count)
.description("Number of active consumers")
.tag("cluster", clusterName)
.register(registry);

View file

@ -45,6 +45,7 @@ import reactor.core.publisher.Mono;
public class DynamicConfigOperations {
static final String DYNAMIC_CONFIG_ENABLED_ENV_PROPERTY = "dynamic.config.enabled";
static final String FILTERING_GROOVY_ENABLED_PROPERTY = "filtering.groovy.enabled";
static final String DYNAMIC_CONFIG_PATH_ENV_PROPERTY = "dynamic.config.path";
static final String DYNAMIC_CONFIG_PATH_ENV_PROPERTY_DEFAULT = "/etc/kafkaui/dynamic_config.yaml";
@ -64,6 +65,10 @@ public class DynamicConfigOperations {
return "true".equalsIgnoreCase(ctx.getEnvironment().getProperty(DYNAMIC_CONFIG_ENABLED_ENV_PROPERTY));
}
public boolean filteringGroovyEnabled() {
return "true".equalsIgnoreCase(ctx.getEnvironment().getProperty(FILTERING_GROOVY_ENABLED_PROPERTY));
}
private Path dynamicConfigFilePath() {
return Paths.get(
Optional.ofNullable(ctx.getEnvironment().getProperty(DYNAMIC_CONFIG_PATH_ENV_PROPERTY))
@ -147,6 +152,14 @@ public class DynamicConfigOperations {
.onErrorMap(th -> new FileUploadException(targetFilePath, th));
}
public void checkIfFilteringGroovyEnabled() {
if (!filteringGroovyEnabled()) {
throw new ValidationException(
"Groovy filters is not allowed. "
+ "Set filtering.groovy.enabled property to 'true' to enabled it.");
}
}
private void checkIfDynamicConfigEnabled() {
if (!dynamicConfigEnabled()) {
throw new ValidationException(

View file

@ -5,4 +5,5 @@ package com.provectus.kafka.ui.util.annotation;
* should be marked with this annotation to make further update process easier.
*/
public @interface KafkaClientInternalsDependant {
String value() default "";
}

View file

@ -43,6 +43,8 @@ public class JsonAvroConversion {
private static final JsonMapper MAPPER = new JsonMapper();
private static final Schema NULL_SCHEMA = Schema.create(Schema.Type.NULL);
private static final String FORMAT = "format";
private static final String DATE_TIME = "date-time";
// converts json into Object that is expected input for KafkaAvroSerializer
// (with AVRO_USE_LOGICAL_TYPE_CONVERTERS flat enabled!)
@ -347,7 +349,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("uuid"))))
Map.of(FORMAT, new TextNode("uuid"))))
),
DECIMAL("decimal",
@ -385,7 +387,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("date"))))
Map.of(FORMAT, new TextNode("date"))))
),
TIME_MILLIS("time-millis",
@ -406,7 +408,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("time"))))
Map.of(FORMAT, new TextNode("time"))))
),
TIME_MICROS("time-micros",
@ -427,7 +429,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("time"))))
Map.of(FORMAT, new TextNode("time"))))
),
TIMESTAMP_MILLIS("timestamp-millis",
@ -448,7 +450,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("date-time"))))
Map.of(FORMAT, new TextNode(DATE_TIME))))
),
TIMESTAMP_MICROS("timestamp-micros",
@ -473,7 +475,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("date-time"))))
Map.of(FORMAT, new TextNode(DATE_TIME))))
),
LOCAL_TIMESTAMP_MILLIS("local-timestamp-millis",
@ -491,7 +493,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("date-time"))))
Map.of(FORMAT, new TextNode(DATE_TIME))))
),
LOCAL_TIMESTAMP_MICROS("local-timestamp-micros",
@ -508,7 +510,7 @@ public class JsonAvroConversion {
new SimpleFieldSchema(
new SimpleJsonType(
JsonType.Type.STRING,
Map.of("format", new TextNode("date-time"))))
Map.of(FORMAT, new TextNode(DATE_TIME))))
);
private final String name;

View file

@ -37,6 +37,9 @@ import reactor.util.function.Tuples;
public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.Descriptor> {
private static final String MAXIMUM = "maximum";
private static final String MINIMUM = "minimum";
private final Set<String> simpleTypesWrapperNames = Set.of(
BoolValue.getDescriptor().getFullName(),
Int32Value.getDescriptor().getFullName(),
@ -156,15 +159,15 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
case INT32, FIXED32, SFIXED32, SINT32 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", IntNode.valueOf(Integer.MAX_VALUE),
"minimum", IntNode.valueOf(Integer.MIN_VALUE)
MAXIMUM, IntNode.valueOf(Integer.MAX_VALUE),
MINIMUM, IntNode.valueOf(Integer.MIN_VALUE)
)
);
case UINT32 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
"minimum", IntNode.valueOf(0)
MAXIMUM, LongNode.valueOf(UnsignedInteger.MAX_VALUE.longValue()),
MINIMUM, IntNode.valueOf(0)
)
);
//TODO: actually all *64 types will be printed with quotes (as strings),
@ -173,15 +176,15 @@ public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.
case INT64, FIXED64, SFIXED64, SINT64 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", LongNode.valueOf(Long.MAX_VALUE),
"minimum", LongNode.valueOf(Long.MIN_VALUE)
MAXIMUM, LongNode.valueOf(Long.MAX_VALUE),
MINIMUM, LongNode.valueOf(Long.MIN_VALUE)
)
);
case UINT64 -> new SimpleJsonType(
JsonType.Type.INTEGER,
Map.of(
"maximum", new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
"minimum", LongNode.valueOf(0)
MAXIMUM, new BigIntegerNode(UnsignedLong.MAX_VALUE.bigIntegerValue()),
MINIMUM, LongNode.valueOf(0)
)
);
case MESSAGE, GROUP -> new SimpleJsonType(JsonType.Type.OBJECT);

View file

@ -56,7 +56,7 @@ public class KafkaConsumerTests extends AbstractIntegrationTest {
}
long count = webTestClient.get()
.uri("/api/clusters/{clusterName}/topics/{topicName}/messages/v2?m=EARLIEST", LOCAL, topicName)
.uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
.accept(TEXT_EVENT_STREAM)
.exchange()
.expectStatus()

View file

@ -1,195 +0,0 @@
package com.provectus.kafka.ui.emitter;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.producer.KafkaTestProducer;
import com.provectus.kafka.ui.serde.api.Serde;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
import com.provectus.kafka.ui.service.PollingCursorsStorage;
import com.provectus.kafka.ui.util.ApplicationMetrics;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import java.util.function.Consumer;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import reactor.core.publisher.Flux;
import reactor.test.StepVerifier;
class CursorTest extends AbstractIntegrationTest {
static final String TOPIC = CursorTest.class.getSimpleName() + "_" + UUID.randomUUID();
static final int MSGS_IN_PARTITION = 20;
static final int PAGE_SIZE = (MSGS_IN_PARTITION / 2) + 1; //to poll fill data set in 2 iterations
final PollingCursorsStorage cursorsStorage = new PollingCursorsStorage();
@BeforeAll
static void setup() {
createTopic(new NewTopic(TOPIC, 1, (short) 1));
try (var producer = KafkaTestProducer.forKafka(kafka)) {
for (int i = 0; i < MSGS_IN_PARTITION; i++) {
producer.send(new ProducerRecord<>(TOPIC, "msg_" + i));
}
}
}
@AfterAll
static void cleanup() {
deleteTopic(TOPIC);
}
@Test
void backwardEmitter() {
var consumerPosition = new ConsumerPosition(PollingModeDTO.LATEST, TOPIC, List.of(), null, null);
var emitter = createBackwardEmitter(consumerPosition);
emitMessages(emitter, PAGE_SIZE);
var cursor = assertCursor(
PollingModeDTO.TO_OFFSET,
offsets -> assertThat(offsets)
.hasSize(1)
.containsEntry(new TopicPartition(TOPIC, 0), 9L)
);
// polling remaining records using registered cursor
emitter = createBackwardEmitterWithCursor(cursor);
emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE);
//checking no new cursors registered
assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor);
}
@Test
void forwardEmitter() {
var consumerPosition = new ConsumerPosition(PollingModeDTO.EARLIEST, TOPIC, List.of(), null, null);
var emitter = createForwardEmitter(consumerPosition);
emitMessages(emitter, PAGE_SIZE);
var cursor = assertCursor(
PollingModeDTO.FROM_OFFSET,
offsets -> assertThat(offsets)
.hasSize(1)
.containsEntry(new TopicPartition(TOPIC, 0), 11L)
);
//polling remaining records using registered cursor
emitter = createForwardEmitterWithCursor(cursor);
emitMessages(emitter, MSGS_IN_PARTITION - PAGE_SIZE);
//checking no new cursors registered
assertThat(cursorsStorage.asMap()).hasSize(1).containsValue(cursor);
}
private Cursor assertCursor(PollingModeDTO expectedMode,
Consumer<Map<TopicPartition, Long>> offsetsAssert) {
Cursor registeredCursor = cursorsStorage.asMap().values().stream().findFirst().orElse(null);
assertThat(registeredCursor).isNotNull();
assertThat(registeredCursor.limit()).isEqualTo(PAGE_SIZE);
assertThat(registeredCursor.deserializer()).isNotNull();
assertThat(registeredCursor.filter()).isNotNull();
var cursorPosition = registeredCursor.consumerPosition();
assertThat(cursorPosition).isNotNull();
assertThat(cursorPosition.topic()).isEqualTo(TOPIC);
assertThat(cursorPosition.partitions()).isEqualTo(List.of());
assertThat(cursorPosition.pollingMode()).isEqualTo(expectedMode);
offsetsAssert.accept(cursorPosition.offsets().tpOffsets());
return registeredCursor;
}
private void emitMessages(AbstractEmitter emitter, int expectedCnt) {
StepVerifier.create(
Flux.create(emitter)
.filter(e -> e.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(e -> e.getMessage().getContent())
)
.expectNextCount(expectedCnt)
.verifyComplete();
}
private BackwardEmitter createBackwardEmitter(ConsumerPosition position) {
return new BackwardEmitter(
this::createConsumer,
position,
PAGE_SIZE,
createRecordsDeserializer(),
m -> true,
PollingSettings.createDefault(),
createCursor(position)
);
}
private BackwardEmitter createBackwardEmitterWithCursor(Cursor cursor) {
return new BackwardEmitter(
this::createConsumer,
cursor.consumerPosition(),
cursor.limit(),
cursor.deserializer(),
cursor.filter(),
PollingSettings.createDefault(),
createCursor(cursor.consumerPosition())
);
}
private ForwardEmitter createForwardEmitterWithCursor(Cursor cursor) {
return new ForwardEmitter(
this::createConsumer,
cursor.consumerPosition(),
cursor.limit(),
cursor.deserializer(),
cursor.filter(),
PollingSettings.createDefault(),
createCursor(cursor.consumerPosition())
);
}
private ForwardEmitter createForwardEmitter(ConsumerPosition position) {
return new ForwardEmitter(
this::createConsumer,
position,
PAGE_SIZE,
createRecordsDeserializer(),
m -> true,
PollingSettings.createDefault(),
createCursor(position)
);
}
private Cursor.Tracking createCursor(ConsumerPosition position) {
return cursorsStorage.createNewCursor(createRecordsDeserializer(), position, m -> true, PAGE_SIZE);
}
private EnhancedConsumer createConsumer() {
Properties props = new Properties();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, PAGE_SIZE - 1); // to check multiple polls
return new EnhancedConsumer(props, PollingThrottler.noop(), ApplicationMetrics.noop());
}
private static ConsumerRecordDeserializer createRecordsDeserializer() {
Serde s = new StringSerde();
s.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty());
return new ConsumerRecordDeserializer(
StringSerde.name(),
s.deserializer(null, Serde.Target.KEY),
StringSerde.name(),
s.deserializer(null, Serde.Target.VALUE),
StringSerde.name(),
s.deserializer(null, Serde.Target.KEY),
s.deserializer(null, Serde.Target.VALUE),
msg -> msg
);
}
}

View file

@ -1,13 +1,8 @@
package com.provectus.kafka.ui.emitter;
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.TAILING;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import java.util.List;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import java.util.Map;
import java.util.stream.Collectors;
import java.util.stream.Stream;
@ -19,8 +14,6 @@ import org.apache.kafka.common.utils.Bytes;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Nested;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
class SeekOperationsTest {
@ -47,22 +40,13 @@ class SeekOperationsTest {
@Nested
class GetOffsetsForSeek {
@Test
void tailing() {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
new ConsumerPosition(TAILING, topic, List.of(), null, null)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
}
@Test
void latest() {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
new ConsumerPosition(LATEST, topic, List.of(), null, null)
SeekTypeDTO.LATEST,
null
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L));
}
@ -72,38 +56,33 @@ class SeekOperationsTest {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
new ConsumerPosition(EARLIEST, topic, List.of(), null, null)
SeekTypeDTO.BEGINNING,
null
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L));
}
@ParameterizedTest
@CsvSource({"TO_OFFSET", "FROM_OFFSET"})
void offsets(PollingModeDTO mode) {
@Test
void offsets() {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
new ConsumerPosition(
mode, topic, List.of(tp1, tp2, tp3), null,
new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 10L, tp3, 26L))
)
SeekTypeDTO.OFFSET,
Map.of(tp1, 10L, tp2, 10L, tp3, 26L)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L));
}
@ParameterizedTest
@CsvSource({"TO_OFFSET", "FROM_OFFSET"})
void offsetsWithBoundsFixing(PollingModeDTO mode) {
@Test
void offsetsWithBoundsFixing() {
var offsets = SeekOperations.getOffsetsForSeek(
consumer,
new OffsetsInfo(consumer, topic),
new ConsumerPosition(
mode, topic, List.of(tp1, tp2, tp3), null,
new ConsumerPosition.Offsets(null, Map.of(tp1, 10L, tp2, 21L, tp3, 24L))
)
SeekTypeDTO.OFFSET,
Map.of(tp1, 10L, tp2, 21L, tp3, 24L)
);
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L));
}
}
}
}

View file

@ -4,9 +4,10 @@ import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
import com.provectus.kafka.ui.service.ClustersStorage;
import com.provectus.kafka.ui.service.MessagesService;
import java.time.Duration;
@ -110,12 +111,13 @@ class TailingEmitterTest extends AbstractIntegrationTest {
return applicationContext.getBean(MessagesService.class)
.loadMessages(cluster, topicName,
new ConsumerPosition(PollingModeDTO.TAILING, topic, List.of(), null, null),
new ConsumerPosition(SeekTypeDTO.LATEST, topic, null),
query,
null,
MessageFilterTypeDTO.STRING_CONTAINS,
0,
StringSerde.name(),
StringSerde.name());
SeekDirectionDTO.TAILING,
"String",
"String");
}
private List<TopicMessageEventDTO> startTailing(String filterQuery) {

View file

@ -8,24 +8,19 @@ import com.provectus.kafka.ui.exception.TopicNotFoundException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.producer.KafkaTestProducer;
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.clients.admin.NewTopic;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import org.springframework.beans.factory.annotation.Autowired;
import reactor.core.publisher.Flux;
import reactor.test.StepVerifier;
@ -40,8 +35,6 @@ class MessagesServiceTest extends AbstractIntegrationTest {
KafkaCluster cluster;
Set<String> createdTopics = new HashSet<>();
@BeforeEach
void init() {
cluster = applicationContext
@ -50,11 +43,6 @@ class MessagesServiceTest extends AbstractIntegrationTest {
.get();
}
@AfterEach
void deleteCreatedTopics() {
createdTopics.forEach(MessagesServiceTest::deleteTopic);
}
@Test
void deleteTopicMessagesReturnsExceptionWhenTopicNotFound() {
StepVerifier.create(messagesService.deleteTopicMessages(cluster, NON_EXISTING_TOPIC, List.of()))
@ -72,9 +60,7 @@ class MessagesServiceTest extends AbstractIntegrationTest {
@Test
void loadMessagesReturnsExceptionWhenTopicNotFound() {
StepVerifier.create(messagesService
.loadMessages(cluster, NON_EXISTING_TOPIC,
new ConsumerPosition(PollingModeDTO.TAILING, NON_EXISTING_TOPIC, List.of(), null, null),
null, null, 1, "String", "String"))
.loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String"))
.expectError(TopicNotFoundException.class)
.verify();
}
@ -82,84 +68,32 @@ class MessagesServiceTest extends AbstractIntegrationTest {
@Test
void maskingAppliedOnConfiguredClusters() throws Exception {
String testTopic = MASKED_TOPICS_PREFIX + UUID.randomUUID();
createTopicWithCleanup(new NewTopic(testTopic, 1, (short) 1));
try (var producer = KafkaTestProducer.forKafka(kafka)) {
createTopic(new NewTopic(testTopic, 1, (short) 1));
producer.send(testTopic, "message1");
producer.send(testTopic, "message2").get();
Flux<TopicMessageDTO> msgsFlux = messagesService.loadMessages(
cluster,
testTopic,
new ConsumerPosition(SeekTypeDTO.BEGINNING, testTopic, null),
null,
null,
100,
SeekDirectionDTO.FORWARD,
StringSerde.name(),
StringSerde.name()
).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(TopicMessageEventDTO::getMessage);
// both messages should be masked
StepVerifier.create(msgsFlux)
.expectNextMatches(msg -> msg.getContent().equals("***"))
.expectNextMatches(msg -> msg.getContent().equals("***"))
.verifyComplete();
} finally {
deleteTopic(testTopic);
}
Flux<TopicMessageDTO> msgsFlux = messagesService.loadMessages(
cluster,
testTopic,
new ConsumerPosition(PollingModeDTO.EARLIEST, testTopic, List.of(), null, null),
null,
null,
100,
StringSerde.name(),
StringSerde.name()
).filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(TopicMessageEventDTO::getMessage);
// both messages should be masked
StepVerifier.create(msgsFlux)
.expectNextMatches(msg -> msg.getContent().equals("***"))
.expectNextMatches(msg -> msg.getContent().equals("***"))
.verifyComplete();
}
@ParameterizedTest
@CsvSource({"EARLIEST", "LATEST"})
void cursorIsRegisteredAfterPollingIsDoneAndCanBeUsedForNextPagePolling(PollingModeDTO mode) {
String testTopic = MessagesServiceTest.class.getSimpleName() + UUID.randomUUID();
createTopicWithCleanup(new NewTopic(testTopic, 5, (short) 1));
int msgsToGenerate = 100;
int pageSize = (msgsToGenerate / 2) + 1;
try (var producer = KafkaTestProducer.forKafka(kafka)) {
for (int i = 0; i < msgsToGenerate; i++) {
producer.send(testTopic, "message_" + i);
}
}
var cursorIdCatcher = new AtomicReference<String>();
Flux<String> msgsFlux = messagesService.loadMessages(
cluster, testTopic,
new ConsumerPosition(mode, testTopic, List.of(), null, null),
null, null, pageSize, StringSerde.name(), StringSerde.name())
.doOnNext(evt -> {
if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) {
assertThat(evt.getCursor()).isNotNull();
cursorIdCatcher.set(evt.getCursor().getId());
}
})
.filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(evt -> evt.getMessage().getContent());
StepVerifier.create(msgsFlux)
.expectNextCount(pageSize)
.verifyComplete();
assertThat(cursorIdCatcher.get()).isNotNull();
Flux<String> remainingMsgs = messagesService.loadMessages(cluster, testTopic, cursorIdCatcher.get())
.doOnNext(evt -> {
if (evt.getType() == TopicMessageEventDTO.TypeEnum.DONE) {
assertThat(evt.getCursor()).isNull();
}
})
.filter(evt -> evt.getType() == TopicMessageEventDTO.TypeEnum.MESSAGE)
.map(evt -> evt.getMessage().getContent());
StepVerifier.create(remainingMsgs)
.expectNextCount(msgsToGenerate - pageSize)
.verifyComplete();
}
private void createTopicWithCleanup(NewTopic newTopic) {
createTopic(newTopic);
createdTopics.add(newTopic.name());
}
@Test

View file

@ -1,16 +1,13 @@
package com.provectus.kafka.ui.service;
import static com.provectus.kafka.ui.model.PollingModeDTO.EARLIEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_OFFSET;
import static com.provectus.kafka.ui.model.PollingModeDTO.FROM_TIMESTAMP;
import static com.provectus.kafka.ui.model.PollingModeDTO.LATEST;
import static com.provectus.kafka.ui.model.PollingModeDTO.TO_OFFSET;
import static com.provectus.kafka.ui.model.PollingModeDTO.TO_TIMESTAMP;
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST;
import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
import static org.assertj.core.api.Assertions.assertThat;
import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.emitter.BackwardEmitter;
import com.provectus.kafka.ui.emitter.Cursor;
import com.provectus.kafka.ui.emitter.EnhancedConsumer;
import com.provectus.kafka.ui.emitter.ForwardEmitter;
import com.provectus.kafka.ui.emitter.PollingSettings;
@ -46,7 +43,6 @@ import org.apache.kafka.common.header.internals.RecordHeader;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import reactor.core.publisher.Flux;
import reactor.core.publisher.FluxSink;
import reactor.test.StepVerifier;
@ -61,18 +57,16 @@ class RecordEmitterTest extends AbstractIntegrationTest {
static final String EMPTY_TOPIC = TOPIC + "_empty";
static final List<Record> SENT_RECORDS = new ArrayList<>();
static final ConsumerRecordDeserializer RECORD_DESERIALIZER = createRecordsDeserializer();
static final Cursor.Tracking CURSOR_MOCK = Mockito.mock(Cursor.Tracking.class);
static final Predicate<TopicMessageDTO> NOOP_FILTER = m -> true;
@BeforeAll
static void generateMsgs() throws Exception {
createTopic(new NewTopic(TOPIC, PARTITIONS, (short) 1));
createTopic(new NewTopic(EMPTY_TOPIC, PARTITIONS, (short) 1));
long startTs = System.currentTimeMillis();
try (var producer = KafkaTestProducer.forKafka(kafka)) {
for (int partition = 0; partition < PARTITIONS; partition++) {
for (int i = 0; i < MSGS_PER_PARTITION; i++) {
long ts = (startTs += 100);
long ts = System.currentTimeMillis() + i;
var value = "msg_" + partition + "_" + i;
var metadata = producer.send(
new ProducerRecord<>(
@ -121,22 +115,20 @@ class RecordEmitterTest extends AbstractIntegrationTest {
void pollNothingOnEmptyTopic() {
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
100,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(EARLIEST, EMPTY_TOPIC, List.of(), null, null),
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
100,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
StepVerifier.create(Flux.create(forwardEmitter))
@ -156,22 +148,20 @@ class RecordEmitterTest extends AbstractIntegrationTest {
void pollFullTopicFromBeginning() {
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(EARLIEST, TOPIC, List.of(), null, null),
new ConsumerPosition(BEGINNING, TOPIC, null),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(LATEST, TOPIC, List.of(), null, null),
new ConsumerPosition(LATEST, TOPIC, null),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
@ -190,24 +180,20 @@ class RecordEmitterTest extends AbstractIntegrationTest {
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(FROM_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
new ConsumerPosition.Offsets(null, targetOffsets)),
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
new ConsumerPosition.Offsets(null, targetOffsets)),
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
var expectedValues = SENT_RECORDS.stream()
@ -227,45 +213,50 @@ class RecordEmitterTest extends AbstractIntegrationTest {
@Test
void pollWithTimestamps() {
var tsStats = SENT_RECORDS.stream().mapToLong(Record::getTimestamp).summaryStatistics();
//choosing ts in the middle
long targetTimestamp = tsStats.getMin() + ((tsStats.getMax() - tsStats.getMin()) / 2);
Map<TopicPartition, Long> targetTimestamps = new HashMap<>();
final Map<TopicPartition, List<Record>> perPartition =
SENT_RECORDS.stream().collect(Collectors.groupingBy((r) -> r.tp));
for (int i = 0; i < PARTITIONS; i++) {
final List<Record> records = perPartition.get(new TopicPartition(TOPIC, i));
int randRecordIdx = ThreadLocalRandom.current().nextInt(records.size());
log.info("partition: {} position: {}", i, randRecordIdx);
targetTimestamps.put(
new TopicPartition(TOPIC, i),
records.get(randRecordIdx).getTimestamp()
);
}
var forwardEmitter = new ForwardEmitter(
this::createConsumer,
new ConsumerPosition(FROM_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
);
expectEmitter(
forwardEmitter,
SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() >= targetTimestamp)
.map(Record::getValue)
.collect(Collectors.toList())
PollingSettings.createDefault()
);
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(TO_TIMESTAMP, TOPIC, List.of(), targetTimestamp, null),
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
PARTITIONS * MSGS_PER_PARTITION,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
expectEmitter(
backwardEmitter,
SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() < targetTimestamp)
.map(Record::getValue)
.collect(Collectors.toList())
);
var expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp()))
.map(Record::getValue)
.collect(Collectors.toList());
expectEmitter(forwardEmitter, expectedValues);
expectedValues = SENT_RECORDS.stream()
.filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp()))
.map(Record::getValue)
.collect(Collectors.toList());
expectEmitter(backwardEmitter, expectedValues);
}
@Test
@ -278,13 +269,11 @@ class RecordEmitterTest extends AbstractIntegrationTest {
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(targetOffsets.keySet()), null,
new ConsumerPosition.Offsets(null, targetOffsets)),
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
numMessages,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
var expectedValues = SENT_RECORDS.stream()
@ -307,13 +296,11 @@ class RecordEmitterTest extends AbstractIntegrationTest {
var backwardEmitter = new BackwardEmitter(
this::createConsumer,
new ConsumerPosition(TO_OFFSET, TOPIC, List.copyOf(offsets.keySet()), null,
new ConsumerPosition.Offsets(null, offsets)),
new ConsumerPosition(OFFSET, TOPIC, offsets),
100,
RECORD_DESERIALIZER,
NOOP_FILTER,
PollingSettings.createDefault(),
CURSOR_MOCK
PollingSettings.createDefault()
);
expectEmitter(backwardEmitter,

View file

@ -7,7 +7,8 @@ import com.provectus.kafka.ui.AbstractIntegrationTest;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.PollingModeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
@ -19,7 +20,6 @@ import io.confluent.kafka.schemaregistry.avro.AvroSchema;
import io.confluent.kafka.schemaregistry.json.JsonSchema;
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.UUID;
@ -500,10 +500,15 @@ public class SendAndReadTests extends AbstractIntegrationTest {
TopicMessageDTO polled = messagesService.loadMessages(
targetCluster,
topic,
new ConsumerPosition(PollingModeDTO.EARLIEST, topic, List.of(), null, null),
new ConsumerPosition(
SeekTypeDTO.BEGINNING,
topic,
Map.of(new TopicPartition(topic, 0), 0L)
),
null,
null,
1,
SeekDirectionDTO.FORWARD,
msgToSend.getKeySerde().get(),
msgToSend.getValueSerde().get()
).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))

View file

@ -763,12 +763,12 @@ paths:
404:
description: Not found
/api/clusters/{clusterName}/topics/{topicName}/smartfilters:
post:
/api/clusters/{clusterName}/topics/{topicName}/activeproducers:
get:
tags:
- Messages
summary: registerFilter
operationId: registerFilter
- Topics
summary: get producer states for topic
operationId: getActiveProducerStates
parameters:
- name: clusterName
in: path
@ -780,101 +780,15 @@ paths:
required: true
schema:
type: string
requestBody:
content:
application/json:
schema:
$ref: '#/components/schemas/MessageFilterRegistration'
responses:
200:
description: OK
content:
application/json:
schema:
$ref: '#/components/schemas/MessageFilterId'
/api/clusters/{clusterName}/topics/{topicName}/messages/v2:
get:
tags:
- Messages
summary: getTopicMessagesV2
operationId: getTopicMessagesV2
parameters:
- name: clusterName
in: path
required: true
schema:
type: string
- name: topicName
in: path
required: true
schema:
type: string
- name: mode
in: query
description: Messages polling mode
required: true
schema:
$ref: "#/components/schemas/PollingMode"
- name: partitions
in: query
schema:
type: array
description: List of target partitions (all partitions if not provided)
items:
type: integer
- name: limit
in: query
description: Max number of messages can be returned
schema:
type: integer
- name: stringFilter
in: query
description: query string to contains string filtration
schema:
type: string
- name: smartFilterId
in: query
description: filter id, that was registered beforehand
schema:
type: string
- name: offset
in: query
description: message offset to read from / to
schema:
type: integer
format: int64
- name: timestamp
in: query
description: timestamp (in ms) to read from / to
schema:
type: integer
format: int64
- name: keySerde
in: query
description: "Serde that should be used for deserialization. Will be chosen automatically if not set."
schema:
type: string
- name: valueSerde
in: query
description: "Serde that should be used for deserialization. Will be chosen automatically if not set."
schema:
type: string
- name: cursor
in: query
description: "id of the cursor for pagination"
schema:
type: string
responses:
200:
description: OK
content:
text/event-stream:
schema:
type: array
items:
$ref: '#/components/schemas/TopicMessageEvent'
$ref: '#/components/schemas/TopicProducerState'
/api/clusters/{clusterName}/topics/{topicName}/consumer-groups:
get:
@ -2732,6 +2646,31 @@ components:
- PROTOBUF
- UNKNOWN
TopicProducerState:
type: object
properties:
partition:
type: integer
format: int32
producerId:
type: integer
format: int64
producerEpoch:
type: integer
format: int32
lastSequence:
type: integer
format: int32
lastTimestampMs:
type: integer
format: int64
coordinatorEpoch:
type: integer
format: int32
currentTransactionStartOffset:
type: integer
format: int64
ConsumerGroup:
discriminator:
propertyName: inherit
@ -2844,14 +2783,13 @@ components:
- MESSAGE
- CONSUMING
- DONE
- EMIT_THROTTLING
message:
$ref: "#/components/schemas/TopicMessage"
phase:
$ref: "#/components/schemas/TopicMessagePhase"
consuming:
$ref: "#/components/schemas/TopicMessageConsuming"
cursor:
$ref: "#/components/schemas/TopicMessageNextPageCursor"
TopicMessagePhase:
type: object
@ -2881,11 +2819,6 @@ components:
filterApplyErrors:
type: integer
TopicMessageNextPageCursor:
type: object
properties:
id:
type: string
TopicMessage:
type: object
@ -2958,29 +2891,6 @@ components:
- TIMESTAMP
- LATEST
MessageFilterRegistration:
type: object
properties:
filterCode:
type: string
MessageFilterId:
type: object
properties:
id:
type: string
PollingMode:
type: string
enum:
- FROM_OFFSET
- TO_OFFSET
- FROM_TIMESTAMP
- TO_TIMESTAMP
- LATEST
- EARLIEST
- TAILING
MessageFilterType:
type: string
enum:

View file

@ -19,7 +19,7 @@
<selenium.version>4.8.1</selenium.version>
<selenide.version>6.12.3</selenide.version>
<testng.version>7.7.1</testng.version>
<allure.version>2.22.2</allure.version>
<allure.version>2.23.0</allure.version>
<qase.io.version>3.0.5</qase.io.version>
<aspectj.version>1.9.9.1</aspectj.version>
<assertj.version>3.24.2</assertj.version>

View file

@ -10,25 +10,27 @@ import lombok.experimental.Accessors;
@Accessors(chain = true)
public class Schema {
private static final String USER_DIR = "user.dir";
private String name, valuePath;
private SchemaType type;
public static Schema createSchemaAvro() {
return new Schema().setName("schema_avro-" + randomAlphabetic(5))
.setType(SchemaType.AVRO)
.setValuePath(System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_avro_value.json");
.setValuePath(System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_avro_value.json");
}
public static Schema createSchemaJson() {
return new Schema().setName("schema_json-" + randomAlphabetic(5))
.setType(SchemaType.JSON)
.setValuePath(System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_json_Value.json");
.setValuePath(System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_json_Value.json");
}
public static Schema createSchemaProtobuf() {
return new Schema().setName("schema_protobuf-" + randomAlphabetic(5))
.setType(SchemaType.PROTOBUF)
.setValuePath(
System.getProperty("user.dir") + "/src/main/resources/testData/schemas/schema_protobuf_value.txt");
System.getProperty(USER_DIR) + "/src/main/resources/testData/schemas/schema_protobuf_value.txt");
}
}

View file

@ -16,6 +16,8 @@ import java.util.stream.Stream;
public class BrokersConfigTab extends BasePage {
protected List<SelenideElement> editBtn = $$x("//button[@aria-label='editAction']");
protected SelenideElement searchByKeyField = $x("//input[@placeholder='Search by Key or Value']");
protected SelenideElement sourceInfoIcon = $x("//div[text()='Source']/..//div/div[@class]");
protected SelenideElement sourceInfoTooltip = $x("//div[text()='Source']/..//div/div[@style]");
protected ElementsCollection editBtns = $$x("//button[@aria-label='editAction']");

View file

@ -19,6 +19,8 @@ import io.qameta.allure.Step;
public class TopicCreateEditForm extends BasePage {
private static final String RETENTION_BYTES = "retentionBytes";
protected SelenideElement timeToRetainField = $x("//input[@id='timeToRetain']");
protected SelenideElement partitionsField = $x("//input[@name='partitions']");
protected SelenideElement nameField = $(id("topicFormName"));
@ -138,12 +140,12 @@ public class TopicCreateEditForm extends BasePage {
@Step
public TopicCreateEditForm selectRetentionBytes(String visibleValue) {
return selectFromDropDownByVisibleText("retentionBytes", visibleValue);
return selectFromDropDownByVisibleText(RETENTION_BYTES, visibleValue);
}
@Step
public TopicCreateEditForm selectRetentionBytes(Long optionValue) {
return selectFromDropDownByOptionValue("retentionBytes", optionValue.toString());
return selectFromDropDownByOptionValue(RETENTION_BYTES, optionValue.toString());
}
@Step
@ -202,7 +204,7 @@ public class TopicCreateEditForm extends BasePage {
@Step
public String getMaxSizeOnDisk() {
return new KafkaUiSelectElement("retentionBytes").getCurrentValue();
return new KafkaUiSelectElement(RETENTION_BYTES).getCurrentValue();
}
@Step

View file

@ -1 +1 @@
v16.15.0
v18.17.1

View file

@ -86,7 +86,7 @@
"eslint": "^8.3.0",
"eslint-config-airbnb": "^19.0.4",
"eslint-config-airbnb-typescript": "^17.0.0",
"eslint-config-prettier": "^8.5.0",
"eslint-config-prettier": "^9.0.0",
"eslint-import-resolver-node": "^0.3.6",
"eslint-import-resolver-typescript": "^3.2.7",
"eslint-plugin-import": "^2.26.0",
@ -106,7 +106,7 @@
"vite-plugin-ejs": "^1.6.4"
},
"engines": {
"node": "v16.15.0",
"pnpm": "^7.4.0"
"node": "v18.17.1",
"pnpm": "^8.6.12"
}
}

File diff suppressed because it is too large Load diff

View file

@ -34,14 +34,19 @@ const Configs: React.FC = () => {
const getData = () => {
return data
.filter(
(item) =>
item.name.toLocaleLowerCase().indexOf(keyword.toLocaleLowerCase()) >
-1
)
.filter((item) => {
const nameMatch = item.name
.toLocaleLowerCase()
.includes(keyword.toLocaleLowerCase());
return nameMatch
? true
: item.value &&
item.value
.toLocaleLowerCase()
.includes(keyword.toLocaleLowerCase()); // try to match the keyword on any of the item.value elements when nameMatch fails but item.value exists
})
.sort((a, b) => {
if (a.source === b.source) return 0;
return a.source === ConfigSource.DYNAMIC_BROKER_CONFIG ? -1 : 1;
});
};
@ -95,7 +100,7 @@ const Configs: React.FC = () => {
<S.SearchWrapper>
<Search
onChange={setKeyword}
placeholder="Search by Key"
placeholder="Search by Key or Value"
value={keyword}
/>
</S.SearchWrapper>

View file

@ -13,7 +13,7 @@ import { brokersPayload } from 'lib/fixtures/brokers';
import { clusterStatsPayload } from 'lib/fixtures/clusters';
const clusterName = 'local';
const brokerId = 1;
const brokerId = 200;
const activeClassName = 'is-active';
const brokerLogdir = {
pageText: 'brokerLogdir',

View file

@ -73,13 +73,13 @@ const BrokersList: React.FC = () => {
header: 'Broker ID',
accessorKey: 'brokerId',
// eslint-disable-next-line react/no-unstable-nested-components
cell: ({ row: { id }, getValue }) => (
cell: ({ getValue }) => (
<S.RowCell>
<LinkCell
value={`${getValue<string | number>()}`}
to={encodeURIComponent(`${getValue<string | number>()}`)}
/>
{id === String(activeControllers) && (
{getValue<string | number>() === activeControllers && (
<Tooltip
value={<CheckMarkRoundIcon />}
content="Active Controller"

View file

@ -56,11 +56,11 @@ describe('BrokersList Component', () => {
});
it('opens broker when row clicked', async () => {
renderComponent();
await userEvent.click(screen.getByRole('cell', { name: '0' }));
await userEvent.click(screen.getByRole('cell', { name: '100' }));
await waitFor(() =>
expect(mockedUsedNavigate).toBeCalledWith(
clusterBrokerPath(clusterName, '0')
clusterBrokerPath(clusterName, '100')
)
);
});
@ -124,6 +124,39 @@ describe('BrokersList Component', () => {
});
});
describe('BrokersList', () => {
describe('when the brokers are loaded', () => {
const testActiveControllers = 0;
beforeEach(() => {
(useBrokers as jest.Mock).mockImplementation(() => ({
data: brokersPayload,
}));
(useClusterStats as jest.Mock).mockImplementation(() => ({
data: clusterStatsPayload,
}));
});
it(`Indicates correct active cluster`, async () => {
renderComponent();
await waitFor(() =>
expect(screen.getByRole('tooltip')).toBeInTheDocument()
);
});
it(`Correct display even if there is no active cluster: ${testActiveControllers} `, async () => {
(useClusterStats as jest.Mock).mockImplementation(() => ({
data: {
...clusterStatsPayload,
activeControllers: testActiveControllers,
},
}));
renderComponent();
await waitFor(() =>
expect(screen.queryByRole('tooltip')).not.toBeInTheDocument()
);
});
});
});
describe('when diskUsage is empty', () => {
beforeEach(() => {
(useBrokers as jest.Mock).mockImplementation(() => ({
@ -157,11 +190,11 @@ describe('BrokersList Component', () => {
});
it('opens broker when row clicked', async () => {
renderComponent();
await userEvent.click(screen.getByRole('cell', { name: '1' }));
await userEvent.click(screen.getByRole('cell', { name: '100' }));
await waitFor(() =>
expect(mockedUsedNavigate).toBeCalledWith(
clusterBrokerPath(clusterName, '1')
clusterBrokerPath(clusterName, '100')
)
);
});

View file

@ -15,7 +15,7 @@ enum Filters {
PARTITION_COUNT = 'partitionCount',
REPLICATION_FACTOR = 'replicationFactor',
INSYNC_REPLICAS = 'inSyncReplicas',
CLEANUP_POLICY = 'Delete',
CLEANUP_POLICY = 'cleanUpPolicy',
}
const New: React.FC = () => {

View file

@ -60,16 +60,16 @@ describe('New', () => {
await userEvent.clear(screen.getByPlaceholderText('Topic Name'));
await userEvent.tab();
await expect(
screen.getByText('name is a required field')
screen.getByText('Topic Name is required')
).toBeInTheDocument();
await userEvent.type(
screen.getByLabelText('Number of partitions *'),
screen.getByLabelText('Number of Partitions *'),
minValue
);
await userEvent.clear(screen.getByLabelText('Number of partitions *'));
await userEvent.clear(screen.getByLabelText('Number of Partitions *'));
await userEvent.tab();
await expect(
screen.getByText('Number of partitions is required and must be a number')
screen.getByText('Number of Partitions is required and must be a number')
).toBeInTheDocument();
expect(createTopicMock).not.toHaveBeenCalled();
@ -89,7 +89,7 @@ describe('New', () => {
renderComponent(clusterTopicNewPath(clusterName));
await userEvent.type(screen.getByPlaceholderText('Topic Name'), topicName);
await userEvent.type(
screen.getByLabelText('Number of partitions *'),
screen.getByLabelText('Number of Partitions *'),
minValue
);
await userEvent.click(screen.getByText('Create topic'));

View file

@ -44,9 +44,11 @@ const Metrics: React.FC = () => {
if (data.progress) {
return (
<S.ProgressContainer>
<S.ProgressPct>
{Math.floor(data.progress.completenessPercent || 0)}%
</S.ProgressPct>
<S.ProgressBarWrapper>
<ProgressBar completed={data.progress.completenessPercent || 0} />
<span> {Math.floor(data.progress.completenessPercent || 0)} %</span>
</S.ProgressBarWrapper>
<ActionButton
onClick={async () => {

View file

@ -42,3 +42,10 @@ export const ProgressBarWrapper = styled.div`
align-items: center;
width: 280px;
`;
export const ProgressPct = styled.span`
font-size: 15px;
font-weight: bold;
line-height: 1.5;
color: ${({ theme }) => theme.statictics.progressPctColor};
`;

View file

@ -1,4 +1,5 @@
import styled from 'styled-components';
import Input from 'components/common/Input/Input';
export const Column = styled.div`
display: flex;
@ -16,6 +17,10 @@ export const CustomParamsHeading = styled.h4`
color: ${({ theme }) => theme.heading.h4};
`;
export const MessageSizeInput = styled(Input)`
min-width: 195px;
`;
export const Label = styled.div`
display: flex;
gap: 16px;

View file

@ -109,12 +109,12 @@ const TopicForm: React.FC<Props> = ({
{!isEditing && (
<div>
<InputLabel htmlFor="topicFormNumberOfPartitions">
Number of partitions *
Number of Partitions *
</InputLabel>
<Input
id="topicFormNumberOfPartitions"
type="number"
placeholder="Number of partitions"
placeholder="Number of Partitions"
min="1"
name="partitions"
positiveOnly
@ -228,7 +228,7 @@ const TopicForm: React.FC<Props> = ({
<InputLabel htmlFor="topicFormMaxMessageBytes">
Maximum message size in bytes
</InputLabel>
<Input
<S.MessageSizeInput
id="topicFormMaxMessageBytes"
type="number"
placeholder="Maximum message size"

View file

@ -37,7 +37,7 @@ describe('TopicForm', () => {
expectByRoleAndNameToBeInDocument('textbox', 'Topic Name *');
expectByRoleAndNameToBeInDocument('spinbutton', 'Number of partitions *');
expectByRoleAndNameToBeInDocument('spinbutton', 'Number of Partitions *');
expectByRoleAndNameToBeInDocument('spinbutton', 'Replication Factor');
expectByRoleAndNameToBeInDocument('spinbutton', 'Min In Sync Replicas');

View file

@ -7,6 +7,7 @@ const CheckMarkRoundIcon: React.FC = () => {
height="14"
viewBox="0 0 14 14"
fill="none"
role="tooltip"
xmlns="http://www.w3.org/2000/svg"
>
<path

View file

@ -1,8 +1,8 @@
import { BrokerConfig, BrokersLogdirs, ConfigSource } from 'generated-sources';
export const brokersPayload = [
{ id: 1, host: 'b-1.test.kafka.amazonaws.com', port: 9092 },
{ id: 2, host: 'b-2.test.kafka.amazonaws.com', port: 9092 },
{ id: 100, host: 'b-1.test.kafka.amazonaws.com', port: 9092 },
{ id: 200, host: 'b-2.test.kafka.amazonaws.com', port: 9092 },
];
const partition = {

View file

@ -32,15 +32,15 @@ export const clustersPayload: Cluster[] = [
export const clusterStatsPayload = {
brokerCount: 2,
activeControllers: 1,
activeControllers: 100,
onlinePartitionCount: 138,
offlinePartitionCount: 0,
inSyncReplicasCount: 239,
outOfSyncReplicasCount: 0,
underReplicatedPartitionCount: 0,
diskUsage: [
{ brokerId: 0, segmentSize: 334567, segmentCount: 245 },
{ brokerId: 1, segmentSize: 12345678, segmentCount: 121 },
{ brokerId: 100, segmentSize: 334567, segmentCount: 245 },
{ brokerId: 200, segmentSize: 12345678, segmentCount: 121 },
],
version: '2.2.1',
};

View file

@ -13,10 +13,10 @@ describe('dateTimeHelpers', () => {
it('should output the correct date', () => {
const date = new Date();
expect(formatTimestamp(date)).toBe(
date.toLocaleString([], { hour12: false })
date.toLocaleString([], { hourCycle: 'h23' })
);
expect(formatTimestamp(date.getTime())).toBe(
date.toLocaleString([], { hour12: false })
date.toLocaleString([], { hourCycle: 'h23' })
);
});
});

Some files were not shown because too many files have changed in this diff Show more