Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE_754_acl
Conflicts: documentation/compose/kafka-ui-sasl.yaml kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/AccessContext.java kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/FeatureService.java kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
This commit is contained in:
commit
4173f78c4a
354 changed files with 14004 additions and 8642 deletions
2
.github/workflows/aws_publisher.yaml
vendored
2
.github/workflows/aws_publisher.yaml
vendored
|
@ -31,7 +31,7 @@ jobs:
|
|||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
2
.github/workflows/backend.yml
vendored
2
.github/workflows/backend.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: backend
|
||||
name: Backend build and test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
|
2
.github/workflows/block_merge.yml
vendored
2
.github/workflows/block_merge.yml
vendored
|
@ -6,7 +6,7 @@ jobs:
|
|||
block_merge:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v2
|
||||
- uses: mheap/github-action-required-labels@v3
|
||||
with:
|
||||
mode: exactly
|
||||
count: 0
|
||||
|
|
6
.github/workflows/branch-deploy.yml
vendored
6
.github/workflows/branch-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: DeployFromBranch
|
||||
name: Feature testing init
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
|
@ -10,6 +10,8 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: get branch name
|
||||
id: extract_branch
|
||||
run: |
|
||||
|
@ -43,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/branch-remove.yml
vendored
2
.github/workflows/branch-remove.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: RemoveCustomDeployment
|
||||
name: Feature testing destroy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
|
4
.github/workflows/build-public-image.yml
vendored
4
.github/workflows/build-public-image.yml
vendored
|
@ -9,6 +9,8 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: get branch name
|
||||
id: extract_branch
|
||||
run: |
|
||||
|
@ -40,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: prepare-helm-release
|
||||
name: Prepare helm release
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [prepare-helm-release]
|
||||
|
|
2
.github/workflows/cve.yaml
vendored
2
.github/workflows/cve.yaml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Run CVE checks
|
||||
uses: aquasecurity/trivy-action@0.8.0
|
||||
uses: aquasecurity/trivy-action@0.9.1
|
||||
with:
|
||||
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
|
||||
format: "table"
|
||||
|
|
2
.github/workflows/delete-public-image.yml
vendored
2
.github/workflows/delete-public-image.yml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/documentation.yaml
vendored
2
.github/workflows/documentation.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Documentation
|
||||
name: Documentation URLs linter
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
83
.github/workflows/e2e-automation.yml
vendored
Normal file
83
.github/workflows/e2e-automation.yml
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
name: E2E Automation suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_suite:
|
||||
description: 'Select test suite to run'
|
||||
default: 'regression'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- regression
|
||||
- sanity
|
||||
- smoke
|
||||
qase_token:
|
||||
description: 'Set Qase token to enable integration'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
run: |
|
||||
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
|
||||
- name: Pull with Docker
|
||||
id: pull_chrome
|
||||
run: |
|
||||
docker pull selenium/standalone-chrome:103.0
|
||||
- name: Set up JDK
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '17'
|
||||
distribution: 'zulu'
|
||||
cache: 'maven'
|
||||
- name: Build with Maven
|
||||
id: build_app
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
|
||||
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
|
||||
- name: Compose with Docker
|
||||
id: compose_app
|
||||
# use the following command until #819 will be fixed
|
||||
run: |
|
||||
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
|
||||
- name: Run test suite
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
|
||||
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod
|
||||
- name: Generate Allure report
|
||||
uses: simple-elf/allure-report-action@master
|
||||
if: always()
|
||||
id: allure-report
|
||||
with:
|
||||
allure_results: ./kafka-ui-e2e-checks/allure-results
|
||||
gh_pages: allure-results
|
||||
allure_report: allure-report
|
||||
subfolder: allure-results
|
||||
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
|
||||
- uses: jakejarvis/s3-sync-action@master
|
||||
if: always()
|
||||
env:
|
||||
AWS_S3_BUCKET: 'kafkaui-allure-reports'
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: 'eu-central-1'
|
||||
SOURCE_DIR: 'allure-history/allure-results'
|
||||
- name: Deploy report to Amazon S3
|
||||
if: always()
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Test report"
|
||||
state: "success"
|
||||
sha: ${{ github.sha }}
|
||||
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
|
||||
- name: Dump Docker logs on failure
|
||||
if: failure()
|
||||
uses: jwalton/gh-docker-logs@v2.2.1
|
18
.github/workflows/e2e-checks.yaml
vendored
18
.github/workflows/e2e-checks.yaml
vendored
|
@ -1,7 +1,7 @@
|
|||
name: e2e-checks
|
||||
name: E2E PR health check
|
||||
on:
|
||||
pull_request_target:
|
||||
types: ["opened", "edited", "reopened", "synchronize"]
|
||||
types: [ "opened", "edited", "reopened", "synchronize" ]
|
||||
paths:
|
||||
- "kafka-ui-api/**"
|
||||
- "kafka-ui-contract/**"
|
||||
|
@ -15,6 +15,12 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-central-1
|
||||
- name: Set the values
|
||||
id: set_env_values
|
||||
run: |
|
||||
|
@ -33,7 +39,7 @@ jobs:
|
|||
id: build_app
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
|
||||
./mvnw -B -V -ntp clean package -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
|
||||
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
|
||||
- name: compose app
|
||||
id: compose_app
|
||||
# use the following command until #819 will be fixed
|
||||
|
@ -42,7 +48,7 @@ jobs:
|
|||
- name: e2e run
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
|
||||
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -pl '!kafka-ui-api' test -Pprod
|
||||
./mvnw -B -V -ntp -Dsurefire.suiteXmlFiles='src/test/resources/smoke.xml' -f 'kafka-ui-e2e-checks' test -Pprod
|
||||
- name: Generate allure report
|
||||
uses: simple-elf/allure-report-action@master
|
||||
if: always()
|
||||
|
@ -57,8 +63,6 @@ jobs:
|
|||
if: always()
|
||||
env:
|
||||
AWS_S3_BUCKET: 'kafkaui-allure-reports'
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: 'eu-central-1'
|
||||
SOURCE_DIR: 'allure-history/allure-results'
|
||||
- name: Post the link to allure report
|
||||
|
@ -66,7 +70,7 @@ jobs:
|
|||
uses: Sibz/github-status-action@v1.1.6
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Test report"
|
||||
context: "Click Details button to open Allure report"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
|
||||
|
|
43
.github/workflows/e2e-manual.yml
vendored
Normal file
43
.github/workflows/e2e-manual.yml
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
name: E2E Manual suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test_suite:
|
||||
description: 'Select test suite to run'
|
||||
default: 'manual'
|
||||
required: true
|
||||
type: choice
|
||||
options:
|
||||
- manual
|
||||
- qase
|
||||
qase_token:
|
||||
description: 'Set Qase token to enable integration'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
run: |
|
||||
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
|
||||
- name: Set up JDK
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '17'
|
||||
distribution: 'zulu'
|
||||
cache: 'maven'
|
||||
- name: Build with Maven
|
||||
id: build_app
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
|
||||
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
|
||||
- name: Run test suite
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
|
||||
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod
|
70
.github/workflows/e2e-weekly.yml
vendored
Normal file
70
.github/workflows/e2e-weekly.yml
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
name: E2E Weekly suite
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 1 * * 1'
|
||||
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
run: |
|
||||
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
|
||||
- name: Pull with Docker
|
||||
id: pull_chrome
|
||||
run: |
|
||||
docker pull selenium/standalone-chrome:103.0
|
||||
- name: Set up JDK
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '17'
|
||||
distribution: 'zulu'
|
||||
cache: 'maven'
|
||||
- name: Build with Maven
|
||||
id: build_app
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
|
||||
./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
|
||||
- name: Compose with Docker
|
||||
id: compose_app
|
||||
# use the following command until #819 will be fixed
|
||||
run: |
|
||||
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
|
||||
- name: Run test suite
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
|
||||
./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -Dsurefire.suiteXmlFiles='src/test/resources/sanity.xml' -Dsuite=weekly -f 'kafka-ui-e2e-checks' test -Pprod
|
||||
- name: Generate Allure report
|
||||
uses: simple-elf/allure-report-action@master
|
||||
if: always()
|
||||
id: allure-report
|
||||
with:
|
||||
allure_results: ./kafka-ui-e2e-checks/allure-results
|
||||
gh_pages: allure-results
|
||||
allure_report: allure-report
|
||||
subfolder: allure-results
|
||||
report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
|
||||
- uses: jakejarvis/s3-sync-action@master
|
||||
if: always()
|
||||
env:
|
||||
AWS_S3_BUCKET: 'kafkaui-allure-reports'
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: 'eu-central-1'
|
||||
SOURCE_DIR: 'allure-history/allure-results'
|
||||
- name: Deploy report to Amazon S3
|
||||
if: always()
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Test report"
|
||||
state: "success"
|
||||
sha: ${{ github.sha }}
|
||||
target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
|
||||
- name: Dump Docker logs on failure
|
||||
if: failure()
|
||||
uses: jwalton/gh-docker-logs@v2.2.1
|
4
.github/workflows/frontend.yaml
vendored
4
.github/workflows/frontend.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: frontend
|
||||
name: Frontend build and test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.5.1
|
||||
uses: actions/setup-node@v3.6.0
|
||||
with:
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
|
|
2
.github/workflows/helm.yaml
vendored
2
.github/workflows/helm.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Helm
|
||||
name: Helm linter
|
||||
on:
|
||||
pull_request:
|
||||
types: ["opened", "edited", "reopened", "synchronize"]
|
||||
|
|
4
.github/workflows/master.yaml
vendored
4
.github/workflows/master.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Master
|
||||
name: Master branch build & deploy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
@ -9,6 +9,8 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Set up JDK
|
||||
uses: actions/setup-java@v3
|
||||
|
|
2
.github/workflows/pr-checks.yaml
vendored
2
.github/workflows/pr-checks.yaml
vendored
|
@ -7,7 +7,7 @@ jobs:
|
|||
task-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.0
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- uses: dekinderfiets/pr-description-enforcer@0.0.1
|
||||
|
|
2
.github/workflows/release-serde-api.yaml
vendored
2
.github/workflows/release-serde-api.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Release-serde-api
|
||||
name: Release serde api
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
|
|
3
.github/workflows/release.yaml
vendored
3
.github/workflows/release.yaml
vendored
|
@ -12,6 +12,7 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
|
@ -33,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.3.0
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
|
|
@ -12,6 +12,8 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: get branch name
|
||||
id: extract_branch
|
||||
run: |
|
||||
|
@ -45,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
uses: aws-actions/configure-aws-credentials@v1-node16
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
@ -7,7 +7,7 @@ jobs:
|
|||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v7
|
||||
with:
|
||||
days-before-issue-stale: 7
|
||||
days-before-issue-close: 3
|
||||
|
|
2
.github/workflows/terraform-deploy.yml
vendored
2
.github/workflows/terraform-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: terraform_deploy
|
||||
name: Terraform deploy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
18
README.md
18
README.md
|
@ -185,32 +185,30 @@ For example, if you want to use an environment variable to set the `name` parame
|
|||
|`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_PASSWORD` | KSQL DB server's basic authentication password
|
||||
|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION` |Path to the JKS keystore to communicate to KSQL DB
|
||||
|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD` |Password of the JKS keystore for KSQL DB
|
||||
|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTORELOCATION` |Path to the JKS truststore to communicate to KSQL DB
|
||||
|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTOREPASSWORD` |Password of the JKS truststore for KSQL DB
|
||||
|`KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` |Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRY` |SchemaRegistry's address
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME` |SchemaRegistry's basic authentication username
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD` |SchemaRegistry's basic authentication password
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION` |Path to the JKS keystore to communicate to SchemaRegistry
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD` |Password of the JKS keystore for SchemaRegistry
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTORELOCATION` |Path to the JKS truststore to communicate to SchemaRegistry
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTOREPASSWORD` |Password of the JKS truststore for SchemaRegistry
|
||||
|`KAFKA_CLUSTERS_0_METRICS_SSL` |Enable SSL for Metrics (for PROMETHEUS metrics type). Default: false.
|
||||
|`KAFKA_CLUSTERS_0_METRICS_USERNAME` |Username for Metrics authentication
|
||||
|`KAFKA_CLUSTERS_0_METRICS_PASSWORD` |Password for Metrics authentication
|
||||
|`KAFKA_CLUSTERS_0_METRICS_KEYSTORELOCATION` |Path to the JKS keystore to communicate to metrics source (JMX/PROMETHEUS). For advanced setup, see `kafka-ui-jmx-secured.yml`
|
||||
|`KAFKA_CLUSTERS_0_METRICS_KEYSTOREPASSWORD` |Password of the JKS metrics keystore
|
||||
|`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE` |How keys are saved to schemaRegistry
|
||||
|`KAFKA_CLUSTERS_0_METRICS_PORT` |Open metrics port of a broker
|
||||
|`KAFKA_CLUSTERS_0_METRICS_TYPE` |Type of metrics retriever to use. Valid values are JMX (default) or PROMETHEUS. If Prometheus, then metrics are read from prometheus-jmx-exporter instead of jmx
|
||||
|`KAFKA_CLUSTERS_0_READONLY` |Enable read-only mode. Default: false
|
||||
|`KAFKA_CLUSTERS_0_DISABLELOGDIRSCOLLECTION` |Disable collecting segments information. It should be true for confluent cloud. Default: false
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME`| Kafka Connect cluster's basic authentication username
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD`| Kafka Connect cluster's basic authentication password
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION`| Path to the JKS keystore to communicate to Kafka Connect
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD`| Password of the JKS keystore for Kafka Connect
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTORELOCATION`| Path to the JKS truststore to communicate to Kafka Connect
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTOREPASSWORD`| Password of the JKS truststore for Kafka Connect
|
||||
|`KAFKA_CLUSTERS_0_METRICS_SSL` |Enable SSL for Metrics? `true` or `false`. For advanced setup, see `kafka-ui-jmx-secured.yml`
|
||||
|`KAFKA_CLUSTERS_0_METRICS_USERNAME` |Username for Metrics authentication
|
||||
|`KAFKA_CLUSTERS_0_METRICS_PASSWORD` |Password for Metrics authentication
|
||||
|`KAFKA_CLUSTERS_0_POLLING_THROTTLE_RATE` |Max traffic rate (bytes/sec) that kafka-ui allowed to reach when polling messages from the cluster. Default: 0 (not limited)
|
||||
|`KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION`| Path to the JKS truststore to communicate to Kafka Connect, SchemaRegistry, KSQL, Metrics
|
||||
|`KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD`| Password of the JKS truststore for Kafka Connect, SchemaRegistry, KSQL, Metrics
|
||||
|`TOPIC_RECREATE_DELAY_SECONDS` |Time delay between topic deletion and topic creation attempts for topic recreate functionality. Default: 1
|
||||
|`TOPIC_RECREATE_MAXRETRIES` |Number of attempts of topic creation after topic deletion for topic recreate functionality. Default: 15
|
||||
|`DYNAMIC_CONFIG_ENABLED`|Allow to change application config in runtime. Default: false.
|
||||
|
|
|
@ -2,6 +2,6 @@ apiVersion: v2
|
|||
name: kafka-ui
|
||||
description: A Helm chart for kafka-UI
|
||||
type: application
|
||||
version: 0.5.1
|
||||
appVersion: v0.5.0
|
||||
version: 0.6.0
|
||||
appVersion: v0.6.0
|
||||
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
||||
|
|
|
@ -68,6 +68,11 @@ This allows us to check if the registry of the image is specified or not.
|
|||
*/}}
|
||||
{{- define "kafka-ui.imageName" -}}
|
||||
{{- $registryName := .Values.image.registry -}}
|
||||
{{- if .Values.global }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- $registryName = .Values.global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- $repository := .Values.image.repository -}}
|
||||
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
|
||||
{{- if $registryName }}
|
||||
|
|
|
@ -35,7 +35,7 @@ spec:
|
|||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: Prefix
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
|
@ -47,13 +47,13 @@ spec:
|
|||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
pathType: Prefix
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: Prefix
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
{{- if .Values.envs.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
|
@ -9,3 +10,4 @@ data:
|
|||
{{- range $key, $val := .Values.envs.secret }}
|
||||
{{ $key }}: {{ $val | b64enc | quote }}
|
||||
{{- end -}}
|
||||
{{- end}}
|
|
@ -111,6 +111,9 @@ ingress:
|
|||
# The path for the Ingress
|
||||
path: "/"
|
||||
|
||||
# The path type for the Ingress
|
||||
pathType: "Prefix"
|
||||
|
||||
# The hostname for the Ingress
|
||||
host: ""
|
||||
|
||||
|
|
0
documentation/compose/jaas/client.properties
Normal file → Executable file
0
documentation/compose/jaas/client.properties
Normal file → Executable file
0
documentation/compose/jaas/kafka_connect.jaas
Normal file → Executable file
0
documentation/compose/jaas/kafka_connect.jaas
Normal file → Executable file
0
documentation/compose/jaas/kafka_connect.password
Normal file → Executable file
0
documentation/compose/jaas/kafka_connect.password
Normal file → Executable file
|
@ -11,4 +11,4 @@ KafkaClient {
|
|||
user_admin="admin-secret";
|
||||
};
|
||||
|
||||
Client {};
|
||||
Client {};
|
||||
|
|
0
documentation/compose/jaas/schema_registry.jaas
Normal file → Executable file
0
documentation/compose/jaas/schema_registry.jaas
Normal file → Executable file
0
documentation/compose/jaas/schema_registry.password
Normal file → Executable file
0
documentation/compose/jaas/schema_registry.password
Normal file → Executable file
|
@ -15,27 +15,25 @@ services:
|
|||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: secret
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
|
||||
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: https://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION: /kafka.keystore.jks
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD: "secret"
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTORELOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTOREPASSWORD: "secret"
|
||||
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVER: https://ksqldb0:8088
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION: /kafka.keystore.jks
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD: "secret"
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTORELOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTOREPASSWORD: "secret"
|
||||
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: https://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION: /kafka.keystore.jks
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD: "secret"
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTORELOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTOREPASSWORD: "secret"
|
||||
|
||||
KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD: "secret"
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary for ssl, added for tests
|
||||
|
||||
volumes:
|
||||
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
|
||||
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
|
||||
|
|
|
@ -11,11 +11,11 @@ services:
|
|||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: secret
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: "secret"
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
|
||||
KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD: "secret"
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
|
||||
volumes:
|
||||
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
|
||||
|
@ -60,4 +60,4 @@ services:
|
|||
- ./ssl/creds:/etc/kafka/secrets/creds
|
||||
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
|
||||
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
|
|
@ -19,6 +19,7 @@ services:
|
|||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
|
|
|
@ -7,11 +7,8 @@ services:
|
|||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 5005:5005
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
|
@ -19,15 +16,12 @@ services:
|
|||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_METRICS_SSL: 'true'
|
||||
KAFKA_CLUSTERS_0_METRICS_USERNAME: root
|
||||
KAFKA_CLUSTERS_0_METRICS_PASSWORD: password
|
||||
JAVA_OPTS: >-
|
||||
-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
|
||||
-Djavax.net.ssl.trustStore=/jmx/clienttruststore
|
||||
-Djavax.net.ssl.trustStorePassword=12345678
|
||||
-Djavax.net.ssl.keyStore=/jmx/clientkeystore
|
||||
-Djavax.net.ssl.keyStorePassword=12345678
|
||||
KAFKA_CLUSTERS_0_METRICS_KEYSTORE_LOCATION: /jmx/clientkeystore
|
||||
KAFKA_CLUSTERS_0_METRICS_KEYSTORE_PASSWORD: '12345678'
|
||||
KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_LOCATION: /jmx/clienttruststore
|
||||
KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_PASSWORD: '12345678'
|
||||
volumes:
|
||||
- ./jmx/clienttruststore:/jmx/clienttruststore
|
||||
- ./jmx/clientkeystore:/jmx/clientkeystore
|
||||
|
@ -70,8 +64,6 @@ services:
|
|||
-Dcom.sun.management.jmxremote.access.file=/jmx/jmxremote.access
|
||||
-Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
-Djava.rmi.server.hostname=kafka0
|
||||
-Djava.rmi.server.logCalls=true
|
||||
# -Djavax.net.debug=ssl:handshake
|
||||
volumes:
|
||||
- ./jmx/serverkeystore:/jmx/serverkeystore
|
||||
- ./jmx/servertruststore:/jmx/servertruststore
|
||||
|
@ -79,56 +71,3 @@ services:
|
|||
- ./jmx/jmxremote.access:/jmx/jmxremote.access
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
environment:
|
||||
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
||||
CONNECT_GROUP_ID: compose-connect-group
|
||||
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
|
||||
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
|
||||
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
||||
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
|
|
@ -8,43 +8,45 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- kafka
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9093
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
|
||||
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper:3.4.6
|
||||
environment:
|
||||
JVMFLAGS: "-Djava.security.auth.login.config=/etc/zookeeper/zookeeper_jaas.conf"
|
||||
volumes:
|
||||
- ./jaas/zookeeper_jaas.conf:/etc/zookeeper/zookeeper_jaas.conf
|
||||
ports:
|
||||
- 2181:2181
|
||||
DYNAMIC_CONFIG_ENABLED: true # not necessary for sasl auth, added for tests
|
||||
|
||||
kafka:
|
||||
image: wurstmeister/kafka:2.13-2.8.1
|
||||
depends_on:
|
||||
- zookeeper
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
ports:
|
||||
- 9092:9092
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer
|
||||
KAFKA_SUPER_USERS: "User:admin"
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
|
||||
KAFKA_LISTENERS: INTERNAL://:9093,EXTERNAL://:9092
|
||||
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9093,EXTERNAL://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:SASL_PLAINTEXT,EXTERNAL:SASL_PLAINTEXT
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/kafka_jaas.conf"
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||
KAFKA_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'SASL_PLAINTEXT'
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: 'PLAIN'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
KAFKA_SECURITY_PROTOCOL: 'SASL_PLAINTEXT'
|
||||
KAFKA_SUPER_USERS: 'User:admin,User:enzo'
|
||||
volumes:
|
||||
- ./jaas/kafka_server.conf:/etc/kafka/kafka_jaas.conf
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
- ./jaas:/etc/kafka/jaas
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
|
|
@ -14,13 +14,16 @@ services:
|
|||
kafka.clusters.0.name: SerdeExampleCluster
|
||||
kafka.clusters.0.bootstrapServers: kafka0:29092
|
||||
kafka.clusters.0.schemaRegistry: http://schemaregistry0:8085
|
||||
# optional auth and ssl properties for SR
|
||||
|
||||
# optional SSL settings for cluster (will be used by SchemaRegistry serde, if set)
|
||||
#kafka.clusters.0.ssl.keystoreLocation: /kafka.keystore.jks
|
||||
#kafka.clusters.0.ssl.keystorePassword: "secret"
|
||||
#kafka.clusters.0.ssl.truststoreLocation: /kafka.truststore.jks
|
||||
#kafka.clusters.0.ssl.truststorePassword: "secret"
|
||||
|
||||
# optional auth properties for SR
|
||||
#kafka.clusters.0.schemaRegistryAuth.username: "use"
|
||||
#kafka.clusters.0.schemaRegistryAuth.password: "pswrd"
|
||||
#kafka.clusters.0.schemaRegistrySSL.keystoreLocation: /kafka.keystore.jks
|
||||
#kafka.clusters.0.schemaRegistrySSL.keystorePassword: "secret"
|
||||
#kafka.clusters.0.schemaRegistrySSL.truststoreLocation: /kafka.truststore.jks
|
||||
#kafka.clusters.0.schemaRegistrySSL.truststorePassword: "secret"
|
||||
|
||||
kafka.clusters.0.defaultKeySerde: Int32 #optional
|
||||
kafka.clusters.0.defaultValueSerde: String #optional
|
||||
|
@ -28,8 +31,7 @@ services:
|
|||
kafka.clusters.0.serde.0.name: ProtobufFile
|
||||
kafka.clusters.0.serde.0.topicKeysPattern: "topic1"
|
||||
kafka.clusters.0.serde.0.topicValuesPattern: "topic1"
|
||||
kafka.clusters.0.serde.0.properties.protobufFiles.0: /protofiles/key-types.proto
|
||||
kafka.clusters.0.serde.0.properties.protobufFiles.1: /protofiles/values.proto
|
||||
kafka.clusters.0.serde.0.properties.protobufFilesDir: /protofiles/
|
||||
kafka.clusters.0.serde.0.properties.protobufMessageNameForKey: test.MyKey # default type for keys
|
||||
kafka.clusters.0.serde.0.properties.protobufMessageName: test.MyValue # default type for values
|
||||
kafka.clusters.0.serde.0.properties.protobufMessageNameForKeyByTopic.topic1: test.MySpecificTopicKey # keys type for topic "topic1"
|
||||
|
@ -52,7 +54,7 @@ services:
|
|||
kafka.clusters.0.serde.4.properties.keySchemaNameTemplate: "%s-key"
|
||||
kafka.clusters.0.serde.4.properties.schemaNameTemplate: "%s-value"
|
||||
#kafka.clusters.0.serde.4.topicValuesPattern: "sr2-topic.*"
|
||||
# optional auth and ssl properties for SR:
|
||||
# optional auth and ssl properties for SR (overrides cluster-level):
|
||||
#kafka.clusters.0.serde.4.properties.username: "user"
|
||||
#kafka.clusters.0.serde.4.properties.password: "passw"
|
||||
#kafka.clusters.0.serde.4.properties.keystoreLocation: /kafka.keystore.jks
|
||||
|
|
|
@ -24,6 +24,7 @@ services:
|
|||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
||||
KAFKA_CLUSTERS_1_METRICS_PORT: 9998
|
||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
DYNAMIC_CONFIG_ENABLED: 'true'
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
syntax = "proto3";
|
||||
package test;
|
||||
|
||||
import "google/protobuf/wrappers.proto";
|
||||
|
||||
message MyKey {
|
||||
string myKeyF1 = 1;
|
||||
google.protobuf.UInt64Value uint_64_wrapper = 2;
|
||||
}
|
||||
|
||||
message MySpecificTopicKey {
|
||||
string special_field1 = 1;
|
||||
string special_field2 = 2;
|
||||
google.protobuf.FloatValue float_wrapper = 3;
|
||||
}
|
||||
|
|
|
@ -12,22 +12,26 @@ To configure Kafkaui to deserialize protobuf messages using a supplied protobuf
|
|||
```yaml
|
||||
kafka:
|
||||
clusters:
|
||||
- # Cluster configuration omitted.
|
||||
# protobufFile is the path to the protobuf schema. (deprecated: please use "protobufFiles")
|
||||
- # Cluster configuration omitted...
|
||||
# protobufFilesDir specifies root location for proto files (will be scanned recursively)
|
||||
# NOTE: if 'protobufFilesDir' specified, then 'protobufFile' and 'protobufFiles' settings will be ignored
|
||||
protobufFilesDir: "/path/to/my-protobufs"
|
||||
# (DEPRECATED) protobufFile is the path to the protobuf schema. (deprecated: please use "protobufFiles")
|
||||
protobufFile: path/to/my.proto
|
||||
# protobufFiles is the path to one or more protobuf schemas.
|
||||
protobufFiles:
|
||||
- /path/to/my.proto
|
||||
- /path/to/another.proto
|
||||
# protobufMessageName is the default protobuf type that is used to deserilize
|
||||
# the message's value if the topic is not found in protobufMessageNameByTopic.
|
||||
# (DEPRECATED) protobufFiles is the location of one or more protobuf schemas
|
||||
protobufFiles:
|
||||
- /path/to/my-protobufs/my.proto
|
||||
- /path/to/my-protobufs/another.proto
|
||||
- /path/to/my-protobufs:test/test.proto
|
||||
# protobufMessageName is the default protobuf type that is used to deserialize
|
||||
# the message's value if the topic is not found in protobufMessageNameByTopic.
|
||||
protobufMessageName: my.DefaultValType
|
||||
# protobufMessageNameByTopic is a mapping of topic names to protobuf types.
|
||||
# This mapping is required and is used to deserialize the Kafka message's value.
|
||||
protobufMessageNameByTopic:
|
||||
topic1: my.Type1
|
||||
topic2: my.Type2
|
||||
# protobufMessageNameForKey is the default protobuf type that is used to deserilize
|
||||
# protobufMessageNameForKey is the default protobuf type that is used to deserialize
|
||||
# the message's key if the topic is not found in protobufMessageNameForKeyByTopic.
|
||||
protobufMessageNameForKey: my.DefaultKeyType
|
||||
# protobufMessageNameForKeyByTopic is a mapping of topic names to protobuf types.
|
||||
|
|
|
@ -20,7 +20,7 @@ kafka:
|
|||
clusters:
|
||||
- name: Cluster1
|
||||
# Other Cluster configuration omitted ...
|
||||
serdes:
|
||||
serde:
|
||||
# registering String serde with custom config
|
||||
- name: AsciiString
|
||||
className: com.provectus.kafka.ui.serdes.builtin.StringSerde
|
||||
|
@ -43,13 +43,11 @@ kafka:
|
|||
clusters:
|
||||
- name: Cluster1
|
||||
# Other Cluster configuration omitted ...
|
||||
serdes:
|
||||
serde:
|
||||
- name: ProtobufFile
|
||||
properties:
|
||||
# path to the protobuf schema files
|
||||
protobufFiles:
|
||||
- path/to/my.proto
|
||||
- path/to/another.proto
|
||||
# path to the protobuf schema files directory
|
||||
protobufFilesDir: "path/to/protofiles"
|
||||
# default protobuf type that is used for KEY serialization/deserialization
|
||||
# optional
|
||||
protobufMessageNameForKey: my.Type1
|
||||
|
@ -84,7 +82,7 @@ kafka:
|
|||
- name: Cluster1
|
||||
# this url will be used by "SchemaRegistry" by default
|
||||
schemaRegistry: http://main-schema-registry:8081
|
||||
serdes:
|
||||
serde:
|
||||
- name: AnotherSchemaRegistry
|
||||
className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
|
||||
properties:
|
||||
|
@ -109,7 +107,7 @@ Sample configuration:
|
|||
kafka:
|
||||
clusters:
|
||||
- name: Cluster1
|
||||
serdes:
|
||||
serde:
|
||||
- name: String
|
||||
topicKeysPattern: click-events|imp-events
|
||||
|
||||
|
@ -131,7 +129,7 @@ kafka:
|
|||
- name: Cluster1
|
||||
defaultKeySerde: Int32
|
||||
defaultValueSerde: String
|
||||
serdes:
|
||||
serde:
|
||||
- name: Int32
|
||||
topicKeysPattern: click-events|imp-events
|
||||
```
|
||||
|
@ -156,7 +154,7 @@ Sample configuration:
|
|||
kafka:
|
||||
clusters:
|
||||
- name: Cluster1
|
||||
serdes:
|
||||
serde:
|
||||
- name: MyCustomSerde
|
||||
className: my.lovely.org.KafkaUiSerde
|
||||
filePath: /var/lib/kui-serde/my-kui-serde.jar
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
FROM azul/zulu-openjdk-alpine:17
|
||||
FROM azul/zulu-openjdk-alpine:17-jre
|
||||
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
RUN mkdir /etc/kafkaui/
|
||||
RUN chown kafkaui /etc/kafkaui
|
||||
|
||||
USER kafkaui
|
||||
|
||||
ARG JAR_FILE
|
||||
|
@ -12,4 +16,5 @@ ENV JAVA_OPTS=
|
|||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD java $JAVA_OPTS -jar kafka-ui-api.jar
|
||||
# see JmxSslSocketFactory docs to understand why add-opens is needed
|
||||
CMD java --add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED $JAVA_OPTS -jar kafka-ui-api.jar
|
||||
|
|
|
@ -199,6 +199,31 @@
|
|||
<version>${antlr4-maven-plugin.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.opendatadiscovery</groupId>
|
||||
<artifactId>oddrn-generator-java</artifactId>
|
||||
<version>${odd-oddrn-generator.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.opendatadiscovery</groupId>
|
||||
<artifactId>ingestion-contract-client</artifactId>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-webflux</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.projectreactor</groupId>
|
||||
<artifactId>reactor-core</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.projectreactor.ipc</groupId>
|
||||
<artifactId>reactor-netty</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
<version>${odd-oddrn-client.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.security</groupId>
|
||||
<artifactId>spring-security-ldap</artifactId>
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
package com.provectus.kafka.ui;
|
||||
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
|
||||
import org.springframework.boot.builder.SpringApplicationBuilder;
|
||||
import org.springframework.context.ConfigurableApplicationContext;
|
||||
import org.springframework.scheduling.annotation.EnableAsync;
|
||||
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
|
||||
|
@ -12,6 +14,13 @@ import org.springframework.scheduling.annotation.EnableScheduling;
|
|||
public class KafkaUiApplication {
|
||||
|
||||
public static void main(String[] args) {
|
||||
SpringApplication.run(KafkaUiApplication.class, args);
|
||||
startApplication(args);
|
||||
}
|
||||
|
||||
public static ConfigurableApplicationContext startApplication(String[] args) {
|
||||
return new SpringApplicationBuilder(KafkaUiApplication.class)
|
||||
.initializers(DynamicConfigOperations.dynamicConfigPropertiesInitializer())
|
||||
.build()
|
||||
.run(args);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.client;
|
|||
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.ConnectCluster;
|
||||
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.connect.ApiClient;
|
||||
import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
|
||||
import com.provectus.kafka.ui.connect.model.Connector;
|
||||
|
@ -12,6 +13,7 @@ import com.provectus.kafka.ui.util.WebClientConfigurator;
|
|||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.core.ParameterizedTypeReference;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
|
@ -31,8 +33,10 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
|
|||
private static final int MAX_RETRIES = 5;
|
||||
private static final Duration RETRIES_DELAY = Duration.ofMillis(200);
|
||||
|
||||
public RetryingKafkaConnectClient(ConnectCluster config, DataSize maxBuffSize) {
|
||||
super(new RetryingApiClient(config, maxBuffSize));
|
||||
public RetryingKafkaConnectClient(ConnectCluster config,
|
||||
@Nullable ClustersProperties.TruststoreConfig truststoreConfig,
|
||||
DataSize maxBuffSize) {
|
||||
super(new RetryingApiClient(config, truststoreConfig, maxBuffSize));
|
||||
}
|
||||
|
||||
private static Retry conflictCodeRetry() {
|
||||
|
@ -77,23 +81,28 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
|
|||
|
||||
private static class RetryingApiClient extends ApiClient {
|
||||
|
||||
public RetryingApiClient(ConnectCluster config, DataSize maxBuffSize) {
|
||||
super(buildWebClient(maxBuffSize, config), null, null);
|
||||
public RetryingApiClient(ConnectCluster config,
|
||||
ClustersProperties.TruststoreConfig truststoreConfig,
|
||||
DataSize maxBuffSize) {
|
||||
super(buildWebClient(maxBuffSize, config, truststoreConfig), null, null);
|
||||
setBasePath(config.getAddress());
|
||||
setUsername(config.getUserName());
|
||||
setUsername(config.getUsername());
|
||||
setPassword(config.getPassword());
|
||||
}
|
||||
|
||||
public static WebClient buildWebClient(DataSize maxBuffSize, ConnectCluster config) {
|
||||
public static WebClient buildWebClient(DataSize maxBuffSize,
|
||||
ConnectCluster config,
|
||||
ClustersProperties.TruststoreConfig truststoreConfig) {
|
||||
return new WebClientConfigurator()
|
||||
.configureSsl(
|
||||
config.getKeystoreLocation(),
|
||||
config.getKeystorePassword(),
|
||||
config.getTruststoreLocation(),
|
||||
config.getTruststorePassword()
|
||||
truststoreConfig,
|
||||
new ClustersProperties.KeystoreConfig(
|
||||
config.getKeystoreLocation(),
|
||||
config.getKeystorePassword()
|
||||
)
|
||||
)
|
||||
.configureBasicAuth(
|
||||
config.getUserName(),
|
||||
config.getUsername(),
|
||||
config.getPassword()
|
||||
)
|
||||
.configureBufferSize(maxBuffSize)
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.model.MetricsConfig;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Builder;
|
||||
|
@ -30,56 +31,58 @@ public class ClustersProperties {
|
|||
String bootstrapServers;
|
||||
String schemaRegistry;
|
||||
SchemaRegistryAuth schemaRegistryAuth;
|
||||
WebClientSsl schemaRegistrySsl;
|
||||
KeystoreConfig schemaRegistrySsl;
|
||||
String ksqldbServer;
|
||||
KsqldbServerAuth ksqldbServerAuth;
|
||||
WebClientSsl ksqldbServerSsl;
|
||||
KeystoreConfig ksqldbServerSsl;
|
||||
List<ConnectCluster> kafkaConnect;
|
||||
MetricsConfigData metrics;
|
||||
Properties properties;
|
||||
Map<String, Object> properties;
|
||||
boolean readOnly = false;
|
||||
boolean disableLogDirsCollection = false;
|
||||
List<SerdeConfig> serde = new ArrayList<>();
|
||||
List<SerdeConfig> serde;
|
||||
String defaultKeySerde;
|
||||
String defaultValueSerde;
|
||||
List<Masking> masking = new ArrayList<>();
|
||||
long pollingThrottleRate = 0;
|
||||
List<Masking> masking;
|
||||
Long pollingThrottleRate;
|
||||
TruststoreConfig ssl;
|
||||
}
|
||||
|
||||
@Data
|
||||
@ToString(exclude = "password")
|
||||
public static class MetricsConfigData {
|
||||
String type;
|
||||
Integer port;
|
||||
boolean ssl;
|
||||
Boolean ssl;
|
||||
String username;
|
||||
String password;
|
||||
String keystoreLocation;
|
||||
String keystorePassword;
|
||||
}
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
@Builder(toBuilder = true)
|
||||
@ToString(exclude = {"password", "keystorePassword"})
|
||||
public static class ConnectCluster {
|
||||
String name;
|
||||
String address;
|
||||
String userName;
|
||||
String username;
|
||||
String password;
|
||||
String keystoreLocation;
|
||||
String keystorePassword;
|
||||
String truststoreLocation;
|
||||
String truststorePassword;
|
||||
}
|
||||
|
||||
@Data
|
||||
@ToString(exclude = {"password"})
|
||||
public static class SchemaRegistryAuth {
|
||||
String username;
|
||||
String password;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class WebClientSsl {
|
||||
String keystoreLocation;
|
||||
String keystorePassword;
|
||||
@ToString(exclude = {"truststorePassword"})
|
||||
public static class TruststoreConfig {
|
||||
String truststoreLocation;
|
||||
String truststorePassword;
|
||||
}
|
||||
|
@ -89,7 +92,7 @@ public class ClustersProperties {
|
|||
String name;
|
||||
String className;
|
||||
String filePath;
|
||||
Map<String, Object> properties = new HashMap<>();
|
||||
Map<String, Object> properties;
|
||||
String topicKeysPattern;
|
||||
String topicValuesPattern;
|
||||
}
|
||||
|
@ -101,12 +104,21 @@ public class ClustersProperties {
|
|||
String password;
|
||||
}
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
@ToString(exclude = {"keystorePassword"})
|
||||
public static class KeystoreConfig {
|
||||
String keystoreLocation;
|
||||
String keystorePassword;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class Masking {
|
||||
Type type;
|
||||
List<String> fields = List.of(); //if empty - policy will be applied to all fields
|
||||
List<String> pattern = List.of("X", "x", "n", "-"); //used when type=MASK
|
||||
String replacement = "***DATA_MASKED***"; //used when type=REPLACE
|
||||
List<String> fields; //if null or empty list - policy will be applied to all fields
|
||||
List<String> pattern; //used when type=MASK
|
||||
String replacement; //used when type=REPLACE
|
||||
String topicKeysPattern;
|
||||
String topicValuesPattern;
|
||||
|
||||
|
@ -117,7 +129,41 @@ public class ClustersProperties {
|
|||
|
||||
@PostConstruct
|
||||
public void validateAndSetDefaults() {
|
||||
validateClusterNames();
|
||||
if (clusters != null) {
|
||||
validateClusterNames();
|
||||
flattenClusterProperties();
|
||||
setMetricsDefaults();
|
||||
}
|
||||
}
|
||||
|
||||
private void setMetricsDefaults() {
|
||||
for (Cluster cluster : clusters) {
|
||||
if (cluster.getMetrics() != null && !StringUtils.hasText(cluster.getMetrics().getType())) {
|
||||
cluster.getMetrics().setType(MetricsConfig.JMX_METRICS_TYPE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void flattenClusterProperties() {
|
||||
for (Cluster cluster : clusters) {
|
||||
cluster.setProperties(flattenClusterProperties(null, cluster.getProperties()));
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, Object> flattenClusterProperties(@Nullable String prefix,
|
||||
@Nullable Map<String, Object> propertiesMap) {
|
||||
Map<String, Object> flattened = new HashMap<>();
|
||||
if (propertiesMap != null) {
|
||||
propertiesMap.forEach((k, v) -> {
|
||||
String key = prefix == null ? k : prefix + "." + k;
|
||||
if (v instanceof Map<?, ?>) {
|
||||
flattened.putAll(flattenClusterProperties(key, (Map<String, Object>) v));
|
||||
} else {
|
||||
flattened.put(key, v);
|
||||
}
|
||||
});
|
||||
}
|
||||
return flattened;
|
||||
}
|
||||
|
||||
private void validateClusterNames() {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import javax.annotation.PostConstruct;
|
||||
|
@ -32,13 +31,13 @@ public class OAuthProperties {
|
|||
private String clientName;
|
||||
private String redirectUri;
|
||||
private String authorizationGrantType;
|
||||
private Set<String> scope = new HashSet<>();
|
||||
private Set<String> scope;
|
||||
private String issuerUri;
|
||||
private String authorizationUri;
|
||||
private String tokenUri;
|
||||
private String userInfoUri;
|
||||
private String jwkSetUri;
|
||||
private String userNameAttribute;
|
||||
private Map<String, String> customParams = new HashMap<>();
|
||||
private Map<String, String> customParams;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@ import static com.provectus.kafka.ui.config.auth.OAuthProperties.OAuth2Provider;
|
|||
import static org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties.Provider;
|
||||
import static org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties.Registration;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.NoArgsConstructor;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
|
@ -24,7 +26,7 @@ public final class OAuthPropertiesConverter {
|
|||
registration.setClientId(provider.getClientId());
|
||||
registration.setClientSecret(provider.getClientSecret());
|
||||
registration.setClientName(provider.getClientName());
|
||||
registration.setScope(provider.getScope());
|
||||
registration.setScope(Optional.ofNullable(provider.getScope()).orElse(Set.of()));
|
||||
registration.setRedirectUri(provider.getRedirectUri());
|
||||
registration.setAuthorizationGrantType(provider.getAuthorizationGrantType());
|
||||
|
||||
|
@ -71,7 +73,8 @@ public final class OAuthPropertiesConverter {
|
|||
}
|
||||
|
||||
private static boolean isGoogle(OAuth2Provider provider) {
|
||||
return GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
|
||||
return provider.getCustomParams() != null
|
||||
&& GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.springframework.security.core.Authentication;
|
|||
import org.springframework.security.web.server.WebFilterExchange;
|
||||
import org.springframework.security.web.util.UrlUtils;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.util.Assert;
|
||||
import org.springframework.web.server.WebSession;
|
||||
import org.springframework.web.util.UriComponents;
|
||||
import org.springframework.web.util.UriComponentsBuilder;
|
||||
|
@ -45,6 +46,10 @@ public class CognitoLogoutSuccessHandler implements LogoutSuccessHandler {
|
|||
.fragment(null)
|
||||
.build();
|
||||
|
||||
Assert.isTrue(
|
||||
provider.getCustomParams() != null && provider.getCustomParams().containsKey("logoutUrl"),
|
||||
"Custom params should contain 'logoutUrl'"
|
||||
);
|
||||
final var uri = UriComponentsBuilder
|
||||
.fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
|
||||
.queryParam("client_id", provider.getClientId())
|
||||
|
|
|
@ -66,7 +66,7 @@ public class AccessController implements AuthorizationApi {
|
|||
UserPermissionDTO dto = new UserPermissionDTO();
|
||||
dto.setClusters(clusters);
|
||||
dto.setResource(ResourceTypeDTO.fromValue(permission.getResource().toString().toUpperCase()));
|
||||
dto.setValue(permission.getValue() != null ? permission.getValue().toString() : null);
|
||||
dto.setValue(permission.getValue());
|
||||
dto.setActions(permission.getActions()
|
||||
.stream()
|
||||
.map(String::toUpperCase)
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import static com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction.EDIT;
|
||||
import static com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction.VIEW;
|
||||
|
||||
import com.provectus.kafka.ui.api.ApplicationConfigApi;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.model.ApplicationConfigDTO;
|
||||
import com.provectus.kafka.ui.model.ApplicationConfigPropertiesDTO;
|
||||
import com.provectus.kafka.ui.model.ApplicationConfigValidationDTO;
|
||||
import com.provectus.kafka.ui.model.ApplicationInfoDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterConfigValidationDTO;
|
||||
import com.provectus.kafka.ui.model.RestartRequestDTO;
|
||||
import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.mapstruct.Mapper;
|
||||
import org.mapstruct.factory.Mappers;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.http.codec.multipart.FilePart;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
@Mapper
|
||||
interface PropertiesMapper {
|
||||
|
||||
PropertiesStructure fromDto(ApplicationConfigPropertiesDTO dto);
|
||||
|
||||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
|
||||
return Mono.just(
|
||||
new ApplicationInfoDTO()
|
||||
.enabledFeatures(
|
||||
dynamicConfigOperations.dynamicConfigEnabled()
|
||||
? List.of(ApplicationInfoDTO.EnabledFeaturesEnum.DYNAMIC_CONFIG)
|
||||
: List.of()
|
||||
)
|
||||
).map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.build()
|
||||
)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
|
||||
ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(restartRequestDto)
|
||||
.map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(FilePart file, ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(dynamicConfigOperations.uploadConfigRelatedFile(file))
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
|
||||
ServerWebExchange exchange) {
|
||||
return configDto
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
@Nullable ClustersProperties properties) {
|
||||
if (properties == null || properties.getClusters() == null) {
|
||||
return Mono.just(Map.of());
|
||||
}
|
||||
properties.validateAndSetDefaults();
|
||||
return Flux.fromIterable(properties.getClusters())
|
||||
.flatMap(c -> kafkaClusterFactory.validate(c).map(v -> Tuples.of(c.getName(), v)))
|
||||
.collectMap(Tuple2::getT1, Tuple2::getT2);
|
||||
}
|
||||
}
|
|
@ -37,10 +37,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
Flux<ConnectDTO> flux = Flux.fromIterable(kafkaConnectService.getConnects(getCluster(clusterName)))
|
||||
Flux<ConnectDTO> availableConnects = kafkaConnectService.getConnects(getCluster(clusterName))
|
||||
.filterWhen(dto -> accessControlService.isConnectAccessible(dto, clusterName));
|
||||
|
||||
return Mono.just(ResponseEntity.ok(flux));
|
||||
return Mono.just(ResponseEntity.ok(availableConnects));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -54,7 +54,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.build());
|
||||
|
||||
return validateAccess.thenReturn(
|
||||
ResponseEntity.ok(kafkaConnectService.getConnectors(getCluster(clusterName), connectName))
|
||||
ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -175,7 +175,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
List<InternalTopic> filtered = existingTopics.stream()
|
||||
.filter(topic -> !topic.isInternal()
|
||||
|| showInternal != null && showInternal)
|
||||
.filter(topic -> search == null || StringUtils.contains(topic.getName(), search))
|
||||
.filter(topic -> search == null || StringUtils.containsIgnoreCase(topic.getName(), search))
|
||||
.sorted(comparator)
|
||||
.toList();
|
||||
var totalPages = (filtered.size() / pageSize)
|
||||
|
|
|
@ -29,7 +29,9 @@ public enum ErrorCode {
|
|||
RECREATE_TOPIC_TIMEOUT(4015, HttpStatus.REQUEST_TIMEOUT),
|
||||
INVALID_ENTITY_STATE(4016, HttpStatus.BAD_REQUEST),
|
||||
SCHEMA_NOT_DELETED(4017, HttpStatus.INTERNAL_SERVER_ERROR),
|
||||
TOPIC_ANALYSIS_ERROR(4018, HttpStatus.BAD_REQUEST);
|
||||
TOPIC_ANALYSIS_ERROR(4018, HttpStatus.BAD_REQUEST),
|
||||
FILE_UPLOAD_EXCEPTION(4019, HttpStatus.INTERNAL_SERVER_ERROR),
|
||||
;
|
||||
|
||||
static {
|
||||
// codes uniqueness check
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
package com.provectus.kafka.ui.exception;
|
||||
|
||||
import java.nio.file.Path;
|
||||
|
||||
public class FileUploadException extends CustomBaseException {
|
||||
|
||||
public FileUploadException(String msg, Throwable cause) {
|
||||
super(msg, cause);
|
||||
}
|
||||
|
||||
public FileUploadException(Path path, Throwable cause) {
|
||||
super("Error uploading file %s".formatted(path), cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorCode getErrorCode() {
|
||||
return ErrorCode.FILE_UPLOAD_EXCEPTION;
|
||||
}
|
||||
}
|
|
@ -6,6 +6,10 @@ public class ValidationException extends CustomBaseException {
|
|||
super(message);
|
||||
}
|
||||
|
||||
public ValidationException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorCode getErrorCode() {
|
||||
return ErrorCode.VALIDATION_FAIL;
|
||||
|
|
|
@ -6,12 +6,12 @@ import com.provectus.kafka.ui.model.BrokerDTO;
|
|||
import com.provectus.kafka.ui.model.BrokerDiskUsageDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterFeature;
|
||||
import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.ConfigSourceDTO;
|
||||
import com.provectus.kafka.ui.model.ConfigSynonymDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectDTO;
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.InternalBroker;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||
|
@ -103,7 +103,7 @@ public interface ClusterMapper {
|
|||
|
||||
ConnectDTO toKafkaConnect(ClustersProperties.ConnectCluster connect);
|
||||
|
||||
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<Feature> features);
|
||||
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
|
||||
|
||||
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
|
||||
return map.values().stream().map(this::toPartition).collect(Collectors.toList());
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
public enum Feature {
|
||||
public enum ClusterFeature {
|
||||
KAFKA_CONNECT,
|
||||
KSQL_DB,
|
||||
SCHEMA_REGISTRY,
|
|
@ -23,7 +23,7 @@ public class InternalClusterState {
|
|||
private Integer underReplicatedPartitionCount;
|
||||
private List<BrokerDiskUsageDTO> diskUsage;
|
||||
private String version;
|
||||
private List<Feature> features;
|
||||
private List<ClusterFeature> features;
|
||||
private BigDecimal bytesInPerSec;
|
||||
private BigDecimal bytesOutPerSec;
|
||||
private Boolean readOnly;
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
||||
@Data
|
||||
@RequiredArgsConstructor
|
||||
@Builder
|
||||
@EqualsAndHashCode(onlyExplicitlyIncluded = true)
|
||||
public class JmxConnectionInfo {
|
||||
|
||||
@EqualsAndHashCode.Include
|
||||
private final String url;
|
||||
private final boolean ssl;
|
||||
private final String username;
|
||||
private final String password;
|
||||
|
||||
public JmxConnectionInfo(String url) {
|
||||
this.url = url;
|
||||
this.ssl = false;
|
||||
this.username = null;
|
||||
this.password = null;
|
||||
}
|
||||
}
|
|
@ -26,7 +26,6 @@ public class KafkaCluster {
|
|||
private final String bootstrapServers;
|
||||
private final Properties properties;
|
||||
private final boolean readOnly;
|
||||
private final boolean disableLogDirsCollection;
|
||||
private final MetricsConfig metricsConfig;
|
||||
private final DataMasking masking;
|
||||
private final Supplier<PollingThrottler> throttler;
|
||||
|
|
|
@ -17,4 +17,6 @@ public class MetricsConfig {
|
|||
private final boolean ssl;
|
||||
private final String username;
|
||||
private final String password;
|
||||
private final String keystoreLocation;
|
||||
private final String keystorePassword;
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ public class Statistics {
|
|||
ServerStatusDTO status;
|
||||
Throwable lastKafkaException;
|
||||
String version;
|
||||
List<Feature> features;
|
||||
List<ClusterFeature> features;
|
||||
ReactiveAdminClient.ClusterDescription clusterDescription;
|
||||
Metrics metrics;
|
||||
InternalLogDirStats logDirInfo;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.model.rbac;
|
||||
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
|
@ -16,6 +17,8 @@ import org.springframework.util.Assert;
|
|||
@Value
|
||||
public class AccessContext {
|
||||
|
||||
Collection<ApplicationConfigAction> applicationConfigActions;
|
||||
|
||||
String cluster;
|
||||
Collection<ClusterConfigAction> clusterConfigActions;
|
||||
|
||||
|
@ -42,6 +45,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public static final class AccessContextBuilder {
|
||||
private Collection<ApplicationConfigAction> applicationConfigActions = Collections.emptySet();
|
||||
private String cluster;
|
||||
private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
|
||||
private String topic;
|
||||
|
@ -59,6 +63,12 @@ public class AccessContext {
|
|||
private AccessContextBuilder() {
|
||||
}
|
||||
|
||||
public AccessContextBuilder applicationConfigActions(ApplicationConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.applicationConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder cluster(String cluster) {
|
||||
this.cluster = cluster;
|
||||
return this;
|
||||
|
@ -132,7 +142,9 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContext build() {
|
||||
return new AccessContext(cluster, clusterConfigActions,
|
||||
return new AccessContext(
|
||||
applicationConfigActions,
|
||||
cluster, clusterConfigActions,
|
||||
topic, topicActions,
|
||||
consumerGroup, consumerGroupActions,
|
||||
connect, connectActions,
|
||||
|
|
|
@ -3,6 +3,7 @@ package com.provectus.kafka.ui.model.rbac;
|
|||
import static com.provectus.kafka.ui.model.rbac.Resource.CLUSTERCONFIG;
|
||||
import static com.provectus.kafka.ui.model.rbac.Resource.KSQL;
|
||||
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
|
||||
|
@ -12,11 +13,11 @@ import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
|||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.regex.Pattern;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.ToString;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@Getter
|
||||
|
@ -25,18 +26,21 @@ import org.springframework.util.Assert;
|
|||
public class Permission {
|
||||
|
||||
Resource resource;
|
||||
List<String> actions;
|
||||
|
||||
@Nullable
|
||||
Pattern value;
|
||||
List<String> actions;
|
||||
String value;
|
||||
@Nullable
|
||||
transient Pattern compiledValuePattern;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void setResource(String resource) {
|
||||
this.resource = Resource.fromString(resource.toUpperCase());
|
||||
}
|
||||
|
||||
public void setValue(String value) {
|
||||
this.value = Pattern.compile(value);
|
||||
@SuppressWarnings("unused")
|
||||
public void setValue(@Nullable String value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
|
@ -52,14 +56,17 @@ public class Permission {
|
|||
}
|
||||
|
||||
public void transform() {
|
||||
if (CollectionUtils.isEmpty(actions) || this.actions.stream().noneMatch("ALL"::equalsIgnoreCase)) {
|
||||
return;
|
||||
if (value != null) {
|
||||
this.compiledValuePattern = Pattern.compile(value);
|
||||
}
|
||||
if (CollectionUtils.isNotEmpty(actions) && actions.stream().anyMatch("ALL"::equalsIgnoreCase)) {
|
||||
this.actions = getAllActionValues();
|
||||
}
|
||||
this.actions = getActionValues();
|
||||
}
|
||||
|
||||
private List<String> getActionValues() {
|
||||
private List<String> getAllActionValues() {
|
||||
return switch (this.resource) {
|
||||
case APPLICATIONCONFIG -> Arrays.stream(ApplicationConfigAction.values()).map(Enum::toString).toList();
|
||||
case CLUSTERCONFIG -> Arrays.stream(ClusterConfigAction.values()).map(Enum::toString).toList();
|
||||
case TOPIC -> Arrays.stream(TopicAction.values()).map(Enum::toString).toList();
|
||||
case CONSUMER -> Arrays.stream(ConsumerGroupAction.values()).map(Enum::toString).toList();
|
||||
|
|
|
@ -5,6 +5,7 @@ import org.jetbrains.annotations.Nullable;
|
|||
|
||||
public enum Resource {
|
||||
|
||||
APPLICATIONCONFIG,
|
||||
CLUSTERCONFIG,
|
||||
TOPIC,
|
||||
CONSUMER,
|
||||
|
|
|
@ -0,0 +1,18 @@
|
|||
package com.provectus.kafka.ui.model.rbac.permission;
|
||||
|
||||
import org.apache.commons.lang3.EnumUtils;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public enum ApplicationConfigAction implements PermissibleAction {
|
||||
|
||||
VIEW,
|
||||
EDIT
|
||||
|
||||
;
|
||||
|
||||
@Nullable
|
||||
public static ApplicationConfigAction fromString(String name) {
|
||||
return EnumUtils.getEnum(ApplicationConfigAction.class, name);
|
||||
}
|
||||
|
||||
}
|
|
@ -9,6 +9,7 @@ import com.provectus.kafka.ui.config.ClustersProperties.SerdeConfig;
|
|||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.AvroEmbeddedSerde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Base64Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
|
||||
import com.provectus.kafka.ui.serdes.builtin.Int64Serde;
|
||||
|
@ -43,6 +44,7 @@ public class SerdesInitializer {
|
|||
.put(Int64Serde.name(), Int64Serde.class)
|
||||
.put(UInt32Serde.name(), UInt32Serde.class)
|
||||
.put(UInt64Serde.name(), UInt64Serde.class)
|
||||
.put(AvroEmbeddedSerde.name(), AvroEmbeddedSerde.class)
|
||||
.put(Base64Serde.name(), Base64Serde.class)
|
||||
.put(UuidBinarySerde.name(), UuidBinarySerde.class)
|
||||
.build(),
|
||||
|
@ -87,21 +89,23 @@ public class SerdesInitializer {
|
|||
|
||||
Map<String, SerdeInstance> registeredSerdes = new LinkedHashMap<>();
|
||||
// initializing serdes from config
|
||||
for (int i = 0; i < clusterProperties.getSerde().size(); i++) {
|
||||
SerdeConfig serdeConfig = clusterProperties.getSerde().get(i);
|
||||
if (Strings.isNullOrEmpty(serdeConfig.getName())) {
|
||||
throw new ValidationException("'name' property not set for serde: " + serdeConfig);
|
||||
if (clusterProperties.getSerde() != null) {
|
||||
for (int i = 0; i < clusterProperties.getSerde().size(); i++) {
|
||||
SerdeConfig serdeConfig = clusterProperties.getSerde().get(i);
|
||||
if (Strings.isNullOrEmpty(serdeConfig.getName())) {
|
||||
throw new ValidationException("'name' property not set for serde: " + serdeConfig);
|
||||
}
|
||||
if (registeredSerdes.containsKey(serdeConfig.getName())) {
|
||||
throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName());
|
||||
}
|
||||
var instance = createSerdeFromConfig(
|
||||
serdeConfig,
|
||||
new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"),
|
||||
clusterPropertiesResolver,
|
||||
globalPropertiesResolver
|
||||
);
|
||||
registeredSerdes.put(serdeConfig.getName(), instance);
|
||||
}
|
||||
if (registeredSerdes.containsKey(serdeConfig.getName())) {
|
||||
throw new ValidationException("Multiple serdes with same name: " + serdeConfig.getName());
|
||||
}
|
||||
var instance = createSerdeFromConfig(
|
||||
serdeConfig,
|
||||
new PropertyResolverImpl(env, "kafka.clusters." + clusterIndex + ".serde." + i + ".properties"),
|
||||
clusterPropertiesResolver,
|
||||
globalPropertiesResolver
|
||||
);
|
||||
registeredSerdes.put(serdeConfig.getName(), instance);
|
||||
}
|
||||
|
||||
// initializing remaining built-in serdes with empty selection patters
|
||||
|
@ -170,7 +174,7 @@ public class SerdesInitializer {
|
|||
}
|
||||
var clazz = builtInSerdeClasses.get(name);
|
||||
BuiltInSerde serde = createSerdeInstance(clazz);
|
||||
if (serdeConfig.getProperties().isEmpty()) {
|
||||
if (serdeConfig.getProperties() == null || serdeConfig.getProperties().isEmpty()) {
|
||||
if (!autoConfigureSerde(serde, clusterProps, globalProps)) {
|
||||
// no properties provided and serde does not support auto-configuration
|
||||
throw new ValidationException(name + " serde is not configured");
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.avro.file.DataFileReader;
|
||||
import org.apache.avro.file.SeekableByteArrayInput;
|
||||
import org.apache.avro.generic.GenericDatumReader;
|
||||
|
||||
public class AvroEmbeddedSerde implements BuiltInSerde {
|
||||
|
||||
public static String name() {
|
||||
return "Avro (Embedded)";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<String> getDescription() {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<SchemaDescription> getSchema(String topic, Target type) {
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canDeserialize(String topic, Target type) {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canSerialize(String topic, Target type) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return new Deserializer() {
|
||||
@SneakyThrows
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
try (var reader = new DataFileReader<>(new SeekableByteArrayInput(data), new GenericDatumReader<>())) {
|
||||
if (!reader.hasNext()) {
|
||||
// this is very strange situation, when only header present in payload
|
||||
// returning null in this case
|
||||
return new DeserializeResult(null, DeserializeResult.Type.JSON, Map.of());
|
||||
}
|
||||
Object avroObj = reader.next();
|
||||
String jsonValue = new String(AvroSchemaUtils.toJson(avroObj));
|
||||
return new DeserializeResult(jsonValue, DeserializeResult.Type.JSON, Map.of());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -1,9 +1,36 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.AnyProto;
|
||||
import com.google.protobuf.ApiProto;
|
||||
import com.google.protobuf.DescriptorProtos;
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.Descriptors.Descriptor;
|
||||
import com.google.protobuf.DurationProto;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.EmptyProto;
|
||||
import com.google.protobuf.FieldMaskProto;
|
||||
import com.google.protobuf.SourceContextProto;
|
||||
import com.google.protobuf.StructProto;
|
||||
import com.google.protobuf.TimestampProto;
|
||||
import com.google.protobuf.TypeProto;
|
||||
import com.google.protobuf.WrappersProto;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import com.google.type.ColorProto;
|
||||
import com.google.type.DateProto;
|
||||
import com.google.type.DateTimeProto;
|
||||
import com.google.type.DayOfWeekProto;
|
||||
import com.google.type.ExprProto;
|
||||
import com.google.type.FractionProto;
|
||||
import com.google.type.IntervalProto;
|
||||
import com.google.type.LatLngProto;
|
||||
import com.google.type.MoneyProto;
|
||||
import com.google.type.MonthProto;
|
||||
import com.google.type.PhoneNumberProto;
|
||||
import com.google.type.PostalAddressProto;
|
||||
import com.google.type.QuaternionProto;
|
||||
import com.google.type.TimeOfDayProto;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
|
@ -11,13 +38,19 @@ import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
|||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import com.provectus.kafka.ui.util.jsonschema.ProtobufSchemaConverter;
|
||||
import com.squareup.wire.schema.ErrorCollector;
|
||||
import com.squareup.wire.schema.Linker;
|
||||
import com.squareup.wire.schema.Loader;
|
||||
import com.squareup.wire.schema.Location;
|
||||
import com.squareup.wire.schema.ProtoFile;
|
||||
import com.squareup.wire.schema.internal.parser.ProtoFileElement;
|
||||
import com.squareup.wire.schema.internal.parser.ProtoParser;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -28,7 +61,10 @@ import java.util.stream.Collectors;
|
|||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
@Slf4j
|
||||
public class ProtobufFileSerde implements BuiltInSerde {
|
||||
|
||||
public static String name() {
|
||||
|
@ -51,132 +87,35 @@ public class ProtobufFileSerde implements BuiltInSerde {
|
|||
@Override
|
||||
public boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
Optional<String> protobufFile = kafkaClusterProperties.getProperty("protobufFile", String.class);
|
||||
Optional<List<String>> protobufFiles = kafkaClusterProperties.getListProperty("protobufFiles", String.class);
|
||||
return protobufFile.isPresent() || protobufFiles.filter(files -> !files.isEmpty()).isPresent();
|
||||
return Configuration.canBeAutoConfigured(kafkaClusterProperties);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void autoConfigure(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
configure(kafkaClusterProperties);
|
||||
configure(Configuration.create(kafkaClusterProperties));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void configure(PropertyResolver serdeProperties,
|
||||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
configure(serdeProperties);
|
||||
}
|
||||
|
||||
private void configure(PropertyResolver properties) {
|
||||
Map<Path, ProtobufSchema> protobufSchemas = joinPathProperties(properties).stream()
|
||||
.map(path -> Map.entry(path, new ProtobufSchema(readFileAsString(path))))
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
|
||||
|
||||
// Load all referenced message schemas and store their source proto file with the descriptors
|
||||
Map<Descriptor, Path> descriptorPaths = new HashMap<>();
|
||||
Optional<String> protobufMessageName = properties.getProperty("protobufMessageName", String.class);
|
||||
protobufMessageName.ifPresent(messageName -> addProtobufSchema(descriptorPaths, protobufSchemas, messageName));
|
||||
|
||||
Optional<String> protobufMessageNameForKey =
|
||||
properties.getProperty("protobufMessageNameForKey", String.class);
|
||||
protobufMessageNameForKey
|
||||
.ifPresent(messageName -> addProtobufSchema(descriptorPaths, protobufSchemas, messageName));
|
||||
|
||||
Optional<Map<String, String>> protobufMessageNameByTopic =
|
||||
properties.getMapProperty("protobufMessageNameByTopic", String.class, String.class);
|
||||
protobufMessageNameByTopic
|
||||
.ifPresent(messageNamesByTopic -> addProtobufSchemas(descriptorPaths, protobufSchemas, messageNamesByTopic));
|
||||
|
||||
Optional<Map<String, String>> protobufMessageNameForKeyByTopic =
|
||||
properties.getMapProperty("protobufMessageNameForKeyByTopic", String.class, String.class);
|
||||
protobufMessageNameForKeyByTopic
|
||||
.ifPresent(messageNamesByTopic -> addProtobufSchemas(descriptorPaths, protobufSchemas, messageNamesByTopic));
|
||||
|
||||
// Fill dictionary for descriptor lookup by full message name
|
||||
Map<String, Descriptor> descriptorMap = descriptorPaths.keySet().stream()
|
||||
.collect(Collectors.toMap(Descriptor::getFullName, Function.identity()));
|
||||
|
||||
configure(
|
||||
protobufMessageName.map(descriptorMap::get).orElse(null),
|
||||
protobufMessageNameForKey.map(descriptorMap::get).orElse(null),
|
||||
descriptorPaths,
|
||||
protobufMessageNameByTopic.map(map -> populateDescriptors(descriptorMap, map)).orElse(Map.of()),
|
||||
protobufMessageNameForKeyByTopic.map(map -> populateDescriptors(descriptorMap, map)).orElse(Map.of())
|
||||
);
|
||||
configure(Configuration.create(serdeProperties));
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void configure(
|
||||
@Nullable Descriptor defaultMessageDescriptor,
|
||||
@Nullable Descriptor defaultKeyMessageDescriptor,
|
||||
Map<Descriptor, Path> descriptorPaths,
|
||||
Map<String, Descriptor> messageDescriptorMap,
|
||||
Map<String, Descriptor> keyMessageDescriptorMap) {
|
||||
if (defaultMessageDescriptor == null
|
||||
&& defaultKeyMessageDescriptor == null
|
||||
&& messageDescriptorMap.isEmpty()
|
||||
&& keyMessageDescriptorMap.isEmpty()) {
|
||||
void configure(Configuration configuration) {
|
||||
if (configuration.defaultMessageDescriptor() == null
|
||||
&& configuration.defaultKeyMessageDescriptor() == null
|
||||
&& configuration.messageDescriptorMap().isEmpty()
|
||||
&& configuration.keyMessageDescriptorMap().isEmpty()) {
|
||||
throw new ValidationException("Neither default, not per-topic descriptors defined for " + name() + " serde");
|
||||
}
|
||||
this.defaultMessageDescriptor = defaultMessageDescriptor;
|
||||
this.defaultKeyMessageDescriptor = defaultKeyMessageDescriptor;
|
||||
this.descriptorPaths = descriptorPaths;
|
||||
this.messageDescriptorMap = messageDescriptorMap;
|
||||
this.keyMessageDescriptorMap = keyMessageDescriptorMap;
|
||||
}
|
||||
|
||||
private static void addProtobufSchema(Map<Descriptor, Path> descriptorPaths,
|
||||
Map<Path, ProtobufSchema> protobufSchemas,
|
||||
String messageName) {
|
||||
var descriptorAndPath = getDescriptorAndPath(protobufSchemas, messageName);
|
||||
descriptorPaths.put(descriptorAndPath.getKey(), descriptorAndPath.getValue());
|
||||
}
|
||||
|
||||
private static void addProtobufSchemas(Map<Descriptor, Path> descriptorPaths,
|
||||
Map<Path, ProtobufSchema> protobufSchemas,
|
||||
Map<String, String> messageNamesByTopic) {
|
||||
messageNamesByTopic.values().stream()
|
||||
.map(msgName -> getDescriptorAndPath(protobufSchemas, msgName))
|
||||
.forEach(entry -> descriptorPaths.put(entry.getKey(), entry.getValue()));
|
||||
}
|
||||
|
||||
private static List<Path> joinPathProperties(PropertyResolver propertyResolver) {
|
||||
return Stream.concat(
|
||||
propertyResolver.getProperty("protobufFile", String.class).map(List::of).stream(),
|
||||
propertyResolver.getListProperty("protobufFiles", String.class).stream())
|
||||
.flatMap(Collection::stream)
|
||||
.distinct()
|
||||
.map(Path::of)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private static Map.Entry<Descriptor, Path> getDescriptorAndPath(Map<Path, ProtobufSchema> protobufSchemas,
|
||||
String msgName) {
|
||||
return protobufSchemas.entrySet().stream()
|
||||
.filter(schema -> schema.getValue().toDescriptor(msgName) != null)
|
||||
.map(schema -> Map.entry(schema.getValue().toDescriptor(msgName), schema.getKey()))
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new NullPointerException(
|
||||
"The given message type not found in protobuf definition: " + msgName));
|
||||
}
|
||||
|
||||
private static String readFileAsString(Path path) {
|
||||
try {
|
||||
return Files.readString(path);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, Descriptor> populateDescriptors(Map<String, Descriptor> descriptorMap,
|
||||
Map<String, String> messageNameMap) {
|
||||
Map<String, Descriptor> descriptors = new HashMap<>();
|
||||
for (Map.Entry<String, String> entry : messageNameMap.entrySet()) {
|
||||
descriptors.put(entry.getKey(), descriptorMap.get(entry.getValue()));
|
||||
}
|
||||
return descriptors;
|
||||
this.defaultMessageDescriptor = configuration.defaultMessageDescriptor();
|
||||
this.defaultKeyMessageDescriptor = configuration.defaultKeyMessageDescriptor();
|
||||
this.descriptorPaths = configuration.descriptorPaths();
|
||||
this.messageDescriptorMap = configuration.messageDescriptorMap();
|
||||
this.keyMessageDescriptorMap = configuration.keyMessageDescriptorMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -249,4 +188,238 @@ public class ProtobufFileSerde implements BuiltInSerde {
|
|||
Map.of("messageName", descriptor.getFullName())
|
||||
);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private static String readFileAsString(Path path) {
|
||||
return Files.readString(path);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------------------
|
||||
|
||||
@VisibleForTesting
|
||||
record Configuration(@Nullable Descriptor defaultMessageDescriptor,
|
||||
@Nullable Descriptor defaultKeyMessageDescriptor,
|
||||
Map<Descriptor, Path> descriptorPaths,
|
||||
Map<String, Descriptor> messageDescriptorMap,
|
||||
Map<String, Descriptor> keyMessageDescriptorMap) {
|
||||
|
||||
static boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties) {
|
||||
Optional<String> protobufFile = kafkaClusterProperties.getProperty("protobufFile", String.class);
|
||||
Optional<List<String>> protobufFiles = kafkaClusterProperties.getListProperty("protobufFiles", String.class);
|
||||
Optional<String> protobufFilesDir = kafkaClusterProperties.getProperty("protobufFilesDir", String.class);
|
||||
return protobufFilesDir.isPresent()
|
||||
|| protobufFile.isPresent()
|
||||
|| protobufFiles.filter(files -> !files.isEmpty()).isPresent();
|
||||
}
|
||||
|
||||
static Configuration create(PropertyResolver properties) {
|
||||
var protobufSchemas = loadSchemas(
|
||||
properties.getProperty("protobufFile", String.class),
|
||||
properties.getListProperty("protobufFiles", String.class),
|
||||
properties.getProperty("protobufFilesDir", String.class)
|
||||
);
|
||||
|
||||
// Load all referenced message schemas and store their source proto file with the descriptors
|
||||
Map<Descriptor, Path> descriptorPaths = new HashMap<>();
|
||||
Optional<String> protobufMessageName = properties.getProperty("protobufMessageName", String.class);
|
||||
protobufMessageName.ifPresent(messageName -> addProtobufSchema(descriptorPaths, protobufSchemas, messageName));
|
||||
|
||||
Optional<String> protobufMessageNameForKey =
|
||||
properties.getProperty("protobufMessageNameForKey", String.class);
|
||||
protobufMessageNameForKey
|
||||
.ifPresent(messageName -> addProtobufSchema(descriptorPaths, protobufSchemas, messageName));
|
||||
|
||||
Optional<Map<String, String>> protobufMessageNameByTopic =
|
||||
properties.getMapProperty("protobufMessageNameByTopic", String.class, String.class);
|
||||
protobufMessageNameByTopic
|
||||
.ifPresent(messageNamesByTopic -> addProtobufSchemas(descriptorPaths, protobufSchemas, messageNamesByTopic));
|
||||
|
||||
Optional<Map<String, String>> protobufMessageNameForKeyByTopic =
|
||||
properties.getMapProperty("protobufMessageNameForKeyByTopic", String.class, String.class);
|
||||
protobufMessageNameForKeyByTopic
|
||||
.ifPresent(messageNamesByTopic -> addProtobufSchemas(descriptorPaths, protobufSchemas, messageNamesByTopic));
|
||||
|
||||
// Fill dictionary for descriptor lookup by full message name
|
||||
Map<String, Descriptor> descriptorMap = descriptorPaths.keySet().stream()
|
||||
.collect(Collectors.toMap(Descriptor::getFullName, Function.identity()));
|
||||
|
||||
return new Configuration(
|
||||
protobufMessageName.map(descriptorMap::get).orElse(null),
|
||||
protobufMessageNameForKey.map(descriptorMap::get).orElse(null),
|
||||
descriptorPaths,
|
||||
protobufMessageNameByTopic.map(map -> populateDescriptors(descriptorMap, map)).orElse(Map.of()),
|
||||
protobufMessageNameForKeyByTopic.map(map -> populateDescriptors(descriptorMap, map)).orElse(Map.of())
|
||||
);
|
||||
}
|
||||
|
||||
private static Map.Entry<Descriptor, Path> getDescriptorAndPath(Map<Path, ProtobufSchema> protobufSchemas,
|
||||
String msgName) {
|
||||
return protobufSchemas.entrySet().stream()
|
||||
.filter(schema -> schema.getValue().toDescriptor(msgName) != null)
|
||||
.map(schema -> Map.entry(schema.getValue().toDescriptor(msgName), schema.getKey()))
|
||||
.findFirst()
|
||||
.orElseThrow(() -> new NullPointerException(
|
||||
"The given message type not found in protobuf definition: " + msgName));
|
||||
}
|
||||
|
||||
private static Map<String, Descriptor> populateDescriptors(Map<String, Descriptor> descriptorMap,
|
||||
Map<String, String> messageNameMap) {
|
||||
Map<String, Descriptor> descriptors = new HashMap<>();
|
||||
for (Map.Entry<String, String> entry : messageNameMap.entrySet()) {
|
||||
descriptors.put(entry.getKey(), descriptorMap.get(entry.getValue()));
|
||||
}
|
||||
return descriptors;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static Map<Path, ProtobufSchema> loadSchemas(Optional<String> protobufFile,
|
||||
Optional<List<String>> protobufFiles,
|
||||
Optional<String> protobufFilesDir) {
|
||||
if (protobufFilesDir.isPresent()) {
|
||||
if (protobufFile.isPresent() || protobufFiles.isPresent()) {
|
||||
log.warn("protobufFile and protobufFiles properties will be ignored, since protobufFilesDir provided");
|
||||
}
|
||||
List<ProtoFile> loadedFiles = new ProtoSchemaLoader(protobufFilesDir.get()).load();
|
||||
Map<String, ProtoFileElement> allPaths = loadedFiles.stream()
|
||||
.collect(Collectors.toMap(f -> f.getLocation().getPath(), ProtoFile::toElement));
|
||||
return loadedFiles.stream()
|
||||
.collect(Collectors.toMap(
|
||||
f -> Path.of(f.getLocation().getBase(), f.getLocation().getPath()),
|
||||
f -> new ProtobufSchema(f.toElement(), List.of(), allPaths)));
|
||||
}
|
||||
//Supporting for backward-compatibility. Normally, protobufFilesDir setting should be used
|
||||
return Stream.concat(
|
||||
protobufFile.stream(),
|
||||
protobufFiles.stream().flatMap(Collection::stream)
|
||||
)
|
||||
.distinct()
|
||||
.map(Path::of)
|
||||
.collect(Collectors.toMap(path -> path, path -> new ProtobufSchema(readFileAsString(path))));
|
||||
}
|
||||
|
||||
private static void addProtobufSchema(Map<Descriptor, Path> descriptorPaths,
|
||||
Map<Path, ProtobufSchema> protobufSchemas,
|
||||
String messageName) {
|
||||
var descriptorAndPath = getDescriptorAndPath(protobufSchemas, messageName);
|
||||
descriptorPaths.put(descriptorAndPath.getKey(), descriptorAndPath.getValue());
|
||||
}
|
||||
|
||||
private static void addProtobufSchemas(Map<Descriptor, Path> descriptorPaths,
|
||||
Map<Path, ProtobufSchema> protobufSchemas,
|
||||
Map<String, String> messageNamesByTopic) {
|
||||
messageNamesByTopic.values().stream()
|
||||
.map(msgName -> getDescriptorAndPath(protobufSchemas, msgName))
|
||||
.forEach(entry -> descriptorPaths.put(entry.getKey(), entry.getValue()));
|
||||
}
|
||||
}
|
||||
|
||||
static class ProtoSchemaLoader {
|
||||
|
||||
private final Path baseLocation;
|
||||
|
||||
ProtoSchemaLoader(String baseLocationStr) {
|
||||
this.baseLocation = Path.of(baseLocationStr);
|
||||
if (!Files.isReadable(baseLocation)) {
|
||||
throw new ValidationException("proto files directory not readable");
|
||||
}
|
||||
}
|
||||
|
||||
List<ProtoFile> load() {
|
||||
Map<String, ProtoFile> knownTypes = knownProtoFiles();
|
||||
|
||||
Map<String, ProtoFile> filesByLocations = new HashMap<>();
|
||||
filesByLocations.putAll(knownTypes);
|
||||
filesByLocations.putAll(loadFilesWithLocations());
|
||||
|
||||
Linker linker = new Linker(
|
||||
createFilesLoader(filesByLocations),
|
||||
new ErrorCollector(),
|
||||
true,
|
||||
true
|
||||
);
|
||||
var schema = linker.link(filesByLocations.values());
|
||||
linker.getErrors().throwIfNonEmpty();
|
||||
return schema.getProtoFiles()
|
||||
.stream()
|
||||
.filter(p -> !knownTypes.containsKey(p.getLocation().getPath())) //filtering known types
|
||||
.toList();
|
||||
}
|
||||
|
||||
private Map<String, ProtoFile> knownProtoFiles() {
|
||||
return Stream.of(
|
||||
loadKnownProtoFile("google/type/color.proto", ColorProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/date.proto", DateProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/datetime.proto", DateTimeProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/dayofweek.proto", DayOfWeekProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/decimal.proto", com.google.type.DecimalProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/expr.proto", ExprProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/fraction.proto", FractionProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/interval.proto", IntervalProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/latlng.proto", LatLngProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/money.proto", MoneyProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/month.proto", MonthProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/phone_number.proto", PhoneNumberProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/postal_address.proto", PostalAddressProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/quaternion.prot", QuaternionProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/type/timeofday.proto", TimeOfDayProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/any.proto", AnyProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/api.proto", ApiProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/descriptor.proto", DescriptorProtos.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/duration.proto", DurationProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/empty.proto", EmptyProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/field_mask.proto", FieldMaskProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/source_context.proto", SourceContextProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/struct.proto", StructProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/timestamp.proto", TimestampProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/type.proto", TypeProto.getDescriptor()),
|
||||
loadKnownProtoFile("google/protobuf/wrappers.proto", WrappersProto.getDescriptor())
|
||||
).collect(Collectors.toMap(p -> p.getLocation().getPath(), p -> p));
|
||||
}
|
||||
|
||||
private ProtoFile loadKnownProtoFile(String path, Descriptors.FileDescriptor fileDescriptor) {
|
||||
String protoFileString = null;
|
||||
// know type file contains either message or enum
|
||||
if (!fileDescriptor.getMessageTypes().isEmpty()) {
|
||||
protoFileString = new ProtobufSchema(fileDescriptor.getMessageTypes().get(0)).canonicalString();
|
||||
} else if (!fileDescriptor.getEnumTypes().isEmpty()) {
|
||||
protoFileString = new ProtobufSchema(fileDescriptor.getEnumTypes().get(0)).canonicalString();
|
||||
} else {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
return ProtoFile.Companion.get(ProtoParser.Companion.parse(Location.get(path), protoFileString));
|
||||
}
|
||||
|
||||
private Loader createFilesLoader(Map<String, ProtoFile> files) {
|
||||
return new Loader() {
|
||||
@Override
|
||||
public @NotNull ProtoFile load(@NotNull String path) {
|
||||
return Preconditions.checkNotNull(files.get(path), "ProtoFile not found for import '%s'", path);
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Loader withErrors(@NotNull ErrorCollector errorCollector) {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private Map<String, ProtoFile> loadFilesWithLocations() {
|
||||
Map<String, ProtoFile> filesByLocations = new HashMap<>();
|
||||
try (var files = Files.walk(baseLocation)) {
|
||||
files.filter(p -> !Files.isDirectory(p) && p.toString().endsWith(".proto"))
|
||||
.forEach(path -> {
|
||||
// relative path will be used as "import" statement
|
||||
String relativePath = baseLocation.relativize(path).toString();
|
||||
var protoFileElement = ProtoParser.Companion.parse(
|
||||
Location.get(baseLocation.toString(), relativePath),
|
||||
readFileAsString(path)
|
||||
);
|
||||
filesByLocations.put(relativePath, ProtoFile.Companion.get(protoFileElement));
|
||||
});
|
||||
}
|
||||
return filesByLocations;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -70,10 +70,10 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
urls,
|
||||
kafkaClusterProperties.getProperty("schemaRegistryAuth.username", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("schemaRegistryAuth.password", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySSL.keystoreLocation", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySSL.keystorePassword", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySSL.truststoreLocation", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySSL.truststorePassword", String.class).orElse(null)
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySsl.keystoreLocation", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySsl.keystorePassword", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("ssl.truststoreLocation", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("ssl.truststorePassword", String.class).orElse(null)
|
||||
),
|
||||
kafkaClusterProperties.getProperty("schemaRegistryKeySchemaNameTemplate", String.class).orElse("%s-key"),
|
||||
kafkaClusterProperties.getProperty("schemaRegistrySchemaNameTemplate", String.class).orElse("%s-value"),
|
||||
|
@ -98,12 +98,12 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
serdeProperties.getProperty("password", String.class).orElse(null),
|
||||
serdeProperties.getProperty("keystoreLocation", String.class).orElse(null),
|
||||
serdeProperties.getProperty("keystorePassword", String.class).orElse(null),
|
||||
serdeProperties.getProperty("truststoreLocation", String.class).orElse(null),
|
||||
serdeProperties.getProperty("truststorePassword", String.class).orElse(null)
|
||||
kafkaClusterProperties.getProperty("ssl.truststoreLocation", String.class).orElse(null),
|
||||
kafkaClusterProperties.getProperty("ssl.truststorePassword", String.class).orElse(null)
|
||||
),
|
||||
serdeProperties.getProperty("keySchemaNameTemplate", String.class).orElse("%s-key"),
|
||||
serdeProperties.getProperty("schemaNameTemplate", String.class).orElse("%s-value"),
|
||||
kafkaClusterProperties.getProperty("checkSchemaExistenceForDeserialize", Boolean.class)
|
||||
serdeProperties.getProperty("checkSchemaExistenceForDeserialize", Boolean.class)
|
||||
.orElse(false)
|
||||
);
|
||||
}
|
||||
|
@ -148,15 +148,15 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
trustStoreLocation);
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG,
|
||||
trustStorePassword);
|
||||
}
|
||||
|
||||
if (keyStoreLocation != null) {
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG,
|
||||
keyStoreLocation);
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG,
|
||||
keyStorePassword);
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_KEY_PASSWORD_CONFIG,
|
||||
keyStorePassword);
|
||||
}
|
||||
if (keyStoreLocation != null && keyStorePassword != null) {
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG,
|
||||
keyStoreLocation);
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG,
|
||||
keyStorePassword);
|
||||
configs.put(SchemaRegistryClientConfig.CLIENT_NAMESPACE + SslConfigs.SSL_KEY_PASSWORD_CONFIG,
|
||||
keyStorePassword);
|
||||
}
|
||||
|
||||
return new CachedSchemaRegistryClient(
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.io.Closeable;
|
||||
import java.time.Instant;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.Setter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -18,6 +21,9 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class AdminClientServiceImpl implements AdminClientService, Closeable {
|
||||
|
||||
private static final AtomicLong CLIENT_ID_SEQ = new AtomicLong();
|
||||
|
||||
private final Map<String, ReactiveAdminClient> adminClientCache = new ConcurrentHashMap<>();
|
||||
@Setter // used in tests
|
||||
@Value("${kafka.admin-client-timeout:30000}")
|
||||
|
@ -33,14 +39,16 @@ public class AdminClientServiceImpl implements AdminClientService, Closeable {
|
|||
private Mono<ReactiveAdminClient> createAdminClient(KafkaCluster cluster) {
|
||||
return Mono.fromSupplier(() -> {
|
||||
Properties properties = new Properties();
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
|
||||
properties.putAll(cluster.getProperties());
|
||||
properties
|
||||
.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
|
||||
properties.putIfAbsent(AdminClientConfig.CLIENT_ID_CONFIG, "kafka-ui-admin-client-" + System.currentTimeMillis());
|
||||
properties.putIfAbsent(
|
||||
AdminClientConfig.CLIENT_ID_CONFIG,
|
||||
"kafka-ui-admin-" + Instant.now().getEpochSecond() + "-" + CLIENT_ID_SEQ.incrementAndGet()
|
||||
);
|
||||
return AdminClient.create(properties);
|
||||
})
|
||||
.flatMap(ReactiveAdminClient::create)
|
||||
}).flatMap(ac -> ReactiveAdminClient.create(ac).doOnError(th -> ac.close()))
|
||||
.onErrorMap(th -> new IllegalStateException(
|
||||
"Error while creating AdminClient for Cluster " + cluster.getName(), th));
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import com.provectus.kafka.ui.model.InternalTopicConsumerGroup;
|
|||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.SortOrderDTO;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
|
@ -214,6 +215,7 @@ public class ConsumerGroupService {
|
|||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
|
||||
Map<String, Object> properties) {
|
||||
Properties props = new Properties();
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), props);
|
||||
props.putAll(cluster.getProperties());
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-consumer-" + System.currentTimeMillis());
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.ClusterFeature;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.service.ReactiveAdminClient.SupportedFeature;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
@ -26,25 +25,28 @@ public class FeatureService {
|
|||
|
||||
private final AdminClientService adminClientService;
|
||||
|
||||
public Mono<List<Feature>> getAvailableFeatures(KafkaCluster cluster, @Nullable Node controller) {
|
||||
List<Mono<Feature>> features = new ArrayList<>();
|
||||
public Mono<List<ClusterFeature>> getAvailableFeatures(KafkaCluster cluster, @Nullable Node controller) {
|
||||
List<Mono<ClusterFeature>> features = new ArrayList<>();
|
||||
|
||||
if (Optional.ofNullable(cluster.getConnectsClients())
|
||||
.filter(Predicate.not(Map::isEmpty))
|
||||
.isPresent()) {
|
||||
features.add(Mono.just(Feature.KAFKA_CONNECT));
|
||||
features.add(Mono.just(ClusterFeature.KAFKA_CONNECT));
|
||||
}
|
||||
|
||||
if (cluster.getKsqlClient() != null) {
|
||||
features.add(Mono.just(Feature.KSQL_DB));
|
||||
features.add(Mono.just(ClusterFeature.KSQL_DB));
|
||||
}
|
||||
|
||||
if (cluster.getSchemaRegistryClient() != null) {
|
||||
features.add(Mono.just(Feature.SCHEMA_REGISTRY));
|
||||
features.add(Mono.just(ClusterFeature.SCHEMA_REGISTRY));
|
||||
}
|
||||
|
||||
if (controller != null) {
|
||||
features.add(topicDeletion(cluster, controller));
|
||||
features.add(
|
||||
isTopicDeletionEnabled(cluster, controller)
|
||||
.flatMap(r -> Boolean.TRUE.equals(r) ? Mono.just(ClusterFeature.TOPIC_DELETION) : Mono.empty())
|
||||
);
|
||||
}
|
||||
|
||||
features.add(acl(cluster));
|
||||
|
@ -52,7 +54,7 @@ public class FeatureService {
|
|||
return Flux.fromIterable(features).flatMap(m -> m).collectList();
|
||||
}
|
||||
|
||||
private Mono<Feature> topicDeletion(KafkaCluster cluster, Node controller) {
|
||||
private Mono<Boolean> isTopicDeletionEnabled(KafkaCluster cluster, Node controller) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.loadBrokersConfig(List.of(controller.id())))
|
||||
.map(config ->
|
||||
|
@ -61,14 +63,13 @@ public class FeatureService {
|
|||
.filter(e -> e.name().equals(DELETE_TOPIC_ENABLED_SERVER_PROPERTY))
|
||||
.map(e -> Boolean.parseBoolean(e.value()))
|
||||
.findFirst()
|
||||
.orElse(true))
|
||||
.flatMap(enabled -> enabled ? Mono.just(Feature.TOPIC_DELETION) : Mono.empty());
|
||||
.orElse(true));
|
||||
}
|
||||
|
||||
private Mono<Feature> acl(KafkaCluster cluster) {
|
||||
private Mono<ClusterFeature> acl(KafkaCluster cluster) {
|
||||
return adminClientService.get(cluster).flatMap(
|
||||
ac -> ac.getClusterFeatures().contains(SupportedFeature.AUTHORIZED_SECURITY_ENABLED)
|
||||
? Mono.just(Feature.KAFKA_ACL)
|
||||
? Mono.just(ClusterFeature.KAFKA_ACL)
|
||||
: Mono.empty()
|
||||
);
|
||||
}
|
||||
|
|
|
@ -3,12 +3,15 @@ package com.provectus.kafka.ui.service;
|
|||
import com.provectus.kafka.ui.client.RetryingKafkaConnectClient;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
|
||||
import com.provectus.kafka.ui.model.ApplicationPropertyValidationDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterConfigValidationDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MetricsConfig;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
|
||||
import com.provectus.kafka.ui.service.masking.DataMasking;
|
||||
import com.provectus.kafka.ui.sr.ApiClient;
|
||||
import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
|
||||
import com.provectus.kafka.ui.util.KafkaServicesValidation;
|
||||
import com.provectus.kafka.ui.util.PollingThrottler;
|
||||
import com.provectus.kafka.ui.util.ReactiveFailover;
|
||||
import com.provectus.kafka.ui.util.WebClientConfigurator;
|
||||
|
@ -20,13 +23,19 @@ import java.util.Properties;
|
|||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.unit.DataSize;
|
||||
import org.springframework.web.reactive.function.client.WebClient;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class KafkaClusterFactory {
|
||||
|
||||
@Value("${webclient.max-in-memory-buffer-size:20MB}")
|
||||
|
@ -37,52 +46,118 @@ public class KafkaClusterFactory {
|
|||
|
||||
builder.name(clusterProperties.getName());
|
||||
builder.bootstrapServers(clusterProperties.getBootstrapServers());
|
||||
builder.properties(Optional.ofNullable(clusterProperties.getProperties()).orElse(new Properties()));
|
||||
builder.properties(convertProperties(clusterProperties.getProperties()));
|
||||
builder.readOnly(clusterProperties.isReadOnly());
|
||||
builder.disableLogDirsCollection(clusterProperties.isDisableLogDirsCollection());
|
||||
builder.masking(DataMasking.create(clusterProperties.getMasking()));
|
||||
builder.metricsConfig(metricsConfigDataToMetricsConfig(clusterProperties.getMetrics()));
|
||||
builder.throttler(PollingThrottler.throttlerSupplier(clusterProperties));
|
||||
|
||||
builder.schemaRegistryClient(schemaRegistryClient(clusterProperties));
|
||||
builder.connectsClients(connectClients(clusterProperties));
|
||||
builder.ksqlClient(ksqlClient(clusterProperties));
|
||||
|
||||
if (schemaRegistryConfigured(clusterProperties)) {
|
||||
builder.schemaRegistryClient(schemaRegistryClient(clusterProperties));
|
||||
}
|
||||
if (connectClientsConfigured(clusterProperties)) {
|
||||
builder.connectsClients(connectClients(clusterProperties));
|
||||
}
|
||||
if (ksqlConfigured(clusterProperties)) {
|
||||
builder.ksqlClient(ksqlClient(clusterProperties));
|
||||
}
|
||||
if (metricsConfigured(clusterProperties)) {
|
||||
builder.metricsConfig(metricsConfigDataToMetricsConfig(clusterProperties.getMetrics()));
|
||||
}
|
||||
builder.originalProperties(clusterProperties);
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Mono<ClusterConfigValidationDTO> validate(ClustersProperties.Cluster clusterProperties) {
|
||||
if (clusterProperties.getSsl() != null) {
|
||||
Optional<String> errMsg = KafkaServicesValidation.validateTruststore(clusterProperties.getSsl());
|
||||
if (errMsg.isPresent()) {
|
||||
return Mono.just(new ClusterConfigValidationDTO()
|
||||
.kafka(new ApplicationPropertyValidationDTO()
|
||||
.error(true)
|
||||
.errorMessage("Truststore not valid: " + errMsg.get())));
|
||||
}
|
||||
}
|
||||
|
||||
return Mono.zip(
|
||||
KafkaServicesValidation.validateClusterConnection(
|
||||
clusterProperties.getBootstrapServers(),
|
||||
convertProperties(clusterProperties.getProperties()),
|
||||
clusterProperties.getSsl()
|
||||
),
|
||||
schemaRegistryConfigured(clusterProperties)
|
||||
? KafkaServicesValidation.validateSchemaRegistry(
|
||||
() -> schemaRegistryClient(clusterProperties)).map(Optional::of)
|
||||
: Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty()),
|
||||
|
||||
ksqlConfigured(clusterProperties)
|
||||
? KafkaServicesValidation.validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of)
|
||||
: Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty()),
|
||||
|
||||
connectClientsConfigured(clusterProperties)
|
||||
?
|
||||
Flux.fromIterable(clusterProperties.getKafkaConnect())
|
||||
.flatMap(c ->
|
||||
KafkaServicesValidation.validateConnect(() -> connectClient(clusterProperties, c))
|
||||
.map(r -> Tuples.of(c.getName(), r)))
|
||||
.collectMap(Tuple2::getT1, Tuple2::getT2)
|
||||
.map(Optional::of)
|
||||
:
|
||||
Mono.<Optional<Map<String, ApplicationPropertyValidationDTO>>>just(Optional.empty())
|
||||
).map(tuple -> {
|
||||
var validation = new ClusterConfigValidationDTO();
|
||||
validation.kafka(tuple.getT1());
|
||||
tuple.getT2().ifPresent(validation::schemaRegistry);
|
||||
tuple.getT3().ifPresent(validation::ksqldb);
|
||||
tuple.getT4().ifPresent(validation::kafkaConnects);
|
||||
return validation;
|
||||
});
|
||||
}
|
||||
|
||||
private Properties convertProperties(Map<String, Object> propertiesMap) {
|
||||
Properties properties = new Properties();
|
||||
if (propertiesMap != null) {
|
||||
properties.putAll(propertiesMap);
|
||||
}
|
||||
return properties;
|
||||
}
|
||||
|
||||
private boolean connectClientsConfigured(ClustersProperties.Cluster clusterProperties) {
|
||||
return clusterProperties.getKafkaConnect() != null;
|
||||
}
|
||||
|
||||
private Map<String, ReactiveFailover<KafkaConnectClientApi>> connectClients(
|
||||
ClustersProperties.Cluster clusterProperties) {
|
||||
if (clusterProperties.getKafkaConnect() == null) {
|
||||
return null;
|
||||
}
|
||||
Map<String, ReactiveFailover<KafkaConnectClientApi>> connects = new HashMap<>();
|
||||
clusterProperties.getKafkaConnect().forEach(c -> {
|
||||
ReactiveFailover<KafkaConnectClientApi> failover = ReactiveFailover.create(
|
||||
parseUrlList(c.getAddress()),
|
||||
url -> new RetryingKafkaConnectClient(c.toBuilder().address(url).build(), maxBuffSize),
|
||||
ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER,
|
||||
"No alive connect instances available",
|
||||
ReactiveFailover.DEFAULT_RETRY_GRACE_PERIOD_MS
|
||||
);
|
||||
connects.put(c.getName(), failover);
|
||||
});
|
||||
clusterProperties.getKafkaConnect().forEach(c -> connects.put(c.getName(), connectClient(clusterProperties, c)));
|
||||
return connects;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private ReactiveFailover<KafkaConnectClientApi> connectClient(ClustersProperties.Cluster cluster,
|
||||
ClustersProperties.ConnectCluster connectCluster) {
|
||||
return ReactiveFailover.create(
|
||||
parseUrlList(connectCluster.getAddress()),
|
||||
url -> new RetryingKafkaConnectClient(
|
||||
connectCluster.toBuilder().address(url).build(),
|
||||
cluster.getSsl(),
|
||||
maxBuffSize
|
||||
),
|
||||
ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER,
|
||||
"No alive connect instances available",
|
||||
ReactiveFailover.DEFAULT_RETRY_GRACE_PERIOD_MS
|
||||
);
|
||||
}
|
||||
|
||||
private boolean schemaRegistryConfigured(ClustersProperties.Cluster clusterProperties) {
|
||||
return clusterProperties.getSchemaRegistry() != null;
|
||||
}
|
||||
|
||||
private ReactiveFailover<KafkaSrClientApi> schemaRegistryClient(ClustersProperties.Cluster clusterProperties) {
|
||||
if (clusterProperties.getSchemaRegistry() == null) {
|
||||
return null;
|
||||
}
|
||||
var auth = Optional.ofNullable(clusterProperties.getSchemaRegistryAuth())
|
||||
.orElse(new ClustersProperties.SchemaRegistryAuth());
|
||||
WebClient webClient = new WebClientConfigurator()
|
||||
.configureSsl(clusterProperties.getSchemaRegistrySsl())
|
||||
.configureSsl(clusterProperties.getSsl(), clusterProperties.getSchemaRegistrySsl())
|
||||
.configureBasicAuth(auth.getUsername(), auth.getPassword())
|
||||
.configureBufferSize(maxBuffSize)
|
||||
.build();
|
||||
return ReactiveFailover.create(
|
||||
parseUrlList(clusterProperties.getSchemaRegistry()),
|
||||
|
@ -93,16 +168,17 @@ public class KafkaClusterFactory {
|
|||
);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private boolean ksqlConfigured(ClustersProperties.Cluster clusterProperties) {
|
||||
return clusterProperties.getKsqldbServer() != null;
|
||||
}
|
||||
|
||||
private ReactiveFailover<KsqlApiClient> ksqlClient(ClustersProperties.Cluster clusterProperties) {
|
||||
if (clusterProperties.getKsqldbServer() == null) {
|
||||
return null;
|
||||
}
|
||||
return ReactiveFailover.create(
|
||||
parseUrlList(clusterProperties.getKsqldbServer()),
|
||||
url -> new KsqlApiClient(
|
||||
url,
|
||||
clusterProperties.getKsqldbServerAuth(),
|
||||
clusterProperties.getSsl(),
|
||||
clusterProperties.getKsqldbServerSsl(),
|
||||
maxBuffSize
|
||||
),
|
||||
|
@ -116,6 +192,10 @@ public class KafkaClusterFactory {
|
|||
return Stream.of(url.split(",")).map(String::trim).filter(s -> !s.isBlank()).toList();
|
||||
}
|
||||
|
||||
private boolean metricsConfigured(ClustersProperties.Cluster clusterProperties) {
|
||||
return clusterProperties.getMetrics() != null;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private MetricsConfig metricsConfigDataToMetricsConfig(ClustersProperties.MetricsConfigData metricsConfigData) {
|
||||
if (metricsConfigData == null) {
|
||||
|
@ -124,9 +204,11 @@ public class KafkaClusterFactory {
|
|||
MetricsConfig.MetricsConfigBuilder builder = MetricsConfig.builder();
|
||||
builder.type(metricsConfigData.getType());
|
||||
builder.port(metricsConfigData.getPort());
|
||||
builder.ssl(metricsConfigData.isSsl());
|
||||
builder.ssl(Optional.ofNullable(metricsConfigData.getSsl()).orElse(false));
|
||||
builder.username(metricsConfigData.getUsername());
|
||||
builder.password(metricsConfigData.getPassword());
|
||||
builder.keystoreLocation(metricsConfigData.getKeystoreLocation());
|
||||
builder.keystorePassword(metricsConfigData.getKeystorePassword());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,8 @@ import org.springframework.stereotype.Component;
|
|||
class KafkaConfigSanitizer extends Sanitizer {
|
||||
private static final List<String> DEFAULT_PATTERNS_TO_SANITIZE = Arrays.asList(
|
||||
"basic.auth.user.info", /* For Schema Registry credentials */
|
||||
"password", "secret", "token", "key", ".*credentials.*" /* General credential patterns */
|
||||
"password", "secret", "token", "key", ".*credentials.*", /* General credential patterns */
|
||||
"aws.access.*", "aws.secret.*", "aws.session.*" /* AWS-related credential patterns */
|
||||
);
|
||||
|
||||
KafkaConfigSanitizer(
|
||||
|
|
|
@ -28,10 +28,10 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -40,7 +40,6 @@ import org.springframework.stereotype.Service;
|
|||
import org.springframework.web.reactive.function.client.WebClientResponseException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Service
|
||||
|
@ -52,18 +51,18 @@ public class KafkaConnectService {
|
|||
private final ObjectMapper objectMapper;
|
||||
private final KafkaConfigSanitizer kafkaConfigSanitizer;
|
||||
|
||||
public List<ConnectDTO> getConnects(KafkaCluster cluster) {
|
||||
return Optional.ofNullable(cluster.getOriginalProperties().getKafkaConnect())
|
||||
.map(lst -> lst.stream().map(clusterMapper::toKafkaConnect).toList())
|
||||
.orElse(List.of());
|
||||
public Flux<ConnectDTO> getConnects(KafkaCluster cluster) {
|
||||
return Flux.fromIterable(
|
||||
Optional.ofNullable(cluster.getOriginalProperties().getKafkaConnect())
|
||||
.map(lst -> lst.stream().map(clusterMapper::toKafkaConnect).toList())
|
||||
.orElse(List.of())
|
||||
);
|
||||
}
|
||||
|
||||
public Flux<FullConnectorInfoDTO> getAllConnectors(final KafkaCluster cluster,
|
||||
final String search) {
|
||||
Mono<Flux<ConnectDTO>> clusters = Mono.just(Flux.fromIterable(getConnects(cluster))); // TODO get rid
|
||||
return clusters
|
||||
.flatMapMany(Function.identity())
|
||||
.flatMap(connect -> getConnectorNames(cluster, connect.getName()))
|
||||
@Nullable final String search) {
|
||||
return getConnects(cluster)
|
||||
.flatMap(connect -> getConnectorNames(cluster, connect.getName()).map(cn -> Tuples.of(connect.getName(), cn)))
|
||||
.flatMap(pair -> getConnector(cluster, pair.getT1(), pair.getT2()))
|
||||
.flatMap(connector ->
|
||||
getConnectorConfig(cluster, connector.getConnect(), connector.getName())
|
||||
|
@ -99,56 +98,46 @@ public class KafkaConnectService {
|
|||
.filter(matchesSearchTerm(search));
|
||||
}
|
||||
|
||||
private Predicate<FullConnectorInfoDTO> matchesSearchTerm(final String search) {
|
||||
return connector -> getSearchValues(connector)
|
||||
.anyMatch(value -> value.contains(
|
||||
StringUtils.defaultString(
|
||||
search,
|
||||
StringUtils.EMPTY)
|
||||
.toUpperCase()));
|
||||
private Predicate<FullConnectorInfoDTO> matchesSearchTerm(@Nullable final String search) {
|
||||
if (search == null) {
|
||||
return c -> true;
|
||||
}
|
||||
return connector -> getStringsForSearch(connector)
|
||||
.anyMatch(string -> StringUtils.containsIgnoreCase(string, search));
|
||||
}
|
||||
|
||||
private Stream<String> getSearchValues(FullConnectorInfoDTO fullConnectorInfo) {
|
||||
private Stream<String> getStringsForSearch(FullConnectorInfoDTO fullConnectorInfo) {
|
||||
return Stream.of(
|
||||
fullConnectorInfo.getName(),
|
||||
fullConnectorInfo.getStatus().getState().getValue(),
|
||||
fullConnectorInfo.getType().getValue())
|
||||
.map(String::toUpperCase);
|
||||
fullConnectorInfo.getName(),
|
||||
fullConnectorInfo.getStatus().getState().getValue(),
|
||||
fullConnectorInfo.getType().getValue());
|
||||
}
|
||||
|
||||
private Mono<ConnectorTopics> getConnectorTopics(KafkaCluster cluster, String connectClusterName,
|
||||
String connectorName) {
|
||||
public Mono<ConnectorTopics> getConnectorTopics(KafkaCluster cluster, String connectClusterName,
|
||||
String connectorName) {
|
||||
return api(cluster, connectClusterName)
|
||||
.mono(c -> c.getConnectorTopics(connectorName))
|
||||
.map(result -> result.get(connectorName))
|
||||
// old connectors don't have this api, setting empty list for
|
||||
// old Connect API versions don't have this endpoint, setting empty list for
|
||||
// backward-compatibility
|
||||
.onErrorResume(Exception.class, e -> Mono.just(new ConnectorTopics().topics(List.of())));
|
||||
}
|
||||
|
||||
private Flux<Tuple2<String, String>> getConnectorNames(KafkaCluster cluster, String connectName) {
|
||||
return getConnectors(cluster, connectName)
|
||||
.collectList().map(e -> e.get(0))
|
||||
public Flux<String> getConnectorNames(KafkaCluster cluster, String connectName) {
|
||||
return api(cluster, connectName)
|
||||
.flux(client -> client.getConnectors(null))
|
||||
// for some reason `getConnectors` method returns the response as a single string
|
||||
.map(this::parseToList)
|
||||
.flatMapMany(Flux::fromIterable)
|
||||
.map(connector -> Tuples.of(connectName, connector));
|
||||
.collectList().map(e -> e.get(0))
|
||||
.map(this::parseConnectorsNamesStringToList)
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private List<String> parseToList(String json) {
|
||||
private List<String> parseConnectorsNamesStringToList(String json) {
|
||||
return objectMapper.readValue(json, new TypeReference<>() {
|
||||
});
|
||||
}
|
||||
|
||||
public Flux<String> getConnectors(KafkaCluster cluster, String connectName) {
|
||||
return api(cluster, connectName)
|
||||
.flux(client ->
|
||||
client.getConnectors(null)
|
||||
.doOnError(e -> log.error("Unexpected error upon getting connectors", e))
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<ConnectorDTO> createConnector(KafkaCluster cluster, String connectName,
|
||||
Mono<NewConnectorDTO> connector) {
|
||||
return api(cluster, connectName)
|
||||
|
@ -171,9 +160,7 @@ public class KafkaConnectService {
|
|||
private Mono<Boolean> connectorExists(KafkaCluster cluster, String connectName,
|
||||
String connectorName) {
|
||||
return getConnectorNames(cluster, connectName)
|
||||
.map(Tuple2::getT2)
|
||||
.collectList()
|
||||
.map(connectorNames -> connectorNames.contains(connectorName));
|
||||
.any(name -> name.equals(connectorName));
|
||||
}
|
||||
|
||||
public Mono<ConnectorDTO> getConnector(KafkaCluster cluster, String connectName,
|
||||
|
|
|
@ -18,6 +18,7 @@ import com.provectus.kafka.ui.serde.api.Serde;
|
|||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||
import com.provectus.kafka.ui.util.ResultSizeLimiter;
|
||||
import com.provectus.kafka.ui.util.SslPropertiesUtil;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
@ -108,6 +109,7 @@ public class MessagesService {
|
|||
);
|
||||
|
||||
Properties properties = new Properties();
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
|
||||
properties.putAll(cluster.getProperties());
|
||||
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
|
|
|
@ -11,7 +11,7 @@ import com.google.common.collect.Table;
|
|||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.util.NumberUtil;
|
||||
import com.provectus.kafka.ui.util.KafkaVersion;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
|
@ -64,10 +64,12 @@ import org.apache.kafka.common.acl.AclBinding;
|
|||
import org.apache.kafka.common.acl.AclBindingFilter;
|
||||
import org.apache.kafka.common.acl.AclOperation;
|
||||
import org.apache.kafka.common.config.ConfigResource;
|
||||
import org.apache.kafka.common.errors.ClusterAuthorizationException;
|
||||
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
||||
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
||||
import org.apache.kafka.common.errors.InvalidRequestException;
|
||||
import org.apache.kafka.common.errors.SecurityDisabledException;
|
||||
import org.apache.kafka.common.errors.TopicAuthorizationException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||
|
@ -105,6 +107,10 @@ public class ReactiveAdminClient implements Closeable {
|
|||
.map(Tuple2::getT1)
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
static Set<SupportedFeature> defaultFeatures() {
|
||||
return Set.of();
|
||||
}
|
||||
}
|
||||
|
||||
@Value
|
||||
|
@ -127,9 +133,10 @@ public class ReactiveAdminClient implements Closeable {
|
|||
private static Mono<Set<SupportedFeature>> getSupportedUpdateFeaturesForVersion(AdminClient ac, String versionStr) {
|
||||
Float kafkaVersion = null;
|
||||
try {
|
||||
kafkaVersion = NumberUtil.parserClusterVersion(versionStr);
|
||||
float version = KafkaVersion.parse(versionStr);
|
||||
return SupportedFeature.forVersion(version);
|
||||
} catch (NumberFormatException e) {
|
||||
//Nothing to do here
|
||||
return SupportedFeature.defaultFeatures();
|
||||
}
|
||||
return SupportedFeature.forVersion(ac, kafkaVersion);
|
||||
}
|
||||
|
@ -146,7 +153,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
|
||||
// NOTE: if KafkaFuture returns null, that Mono will be empty(!), since Reactor does not support nullable results
|
||||
// (see MonoSink.success(..) javadoc for details)
|
||||
private static <T> Mono<T> toMono(KafkaFuture<T> future) {
|
||||
public static <T> Mono<T> toMono(KafkaFuture<T> future) {
|
||||
return Mono.<T>create(sink -> future.whenComplete((res, ex) -> {
|
||||
if (ex != null) {
|
||||
// KafkaFuture doc is unclear about what exception wrapper will be used
|
||||
|
@ -196,6 +203,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
}
|
||||
|
||||
//NOTE: skips not-found topics (for which UnknownTopicOrPartitionException was thrown by AdminClient)
|
||||
//and topics for which DESCRIBE_CONFIGS permission is not set (TopicAuthorizationException was thrown)
|
||||
public Mono<Map<String, List<ConfigEntry>>> getTopicsConfig(Collection<String> topicNames, boolean includeDoc) {
|
||||
var includeDocFixed = features.contains(SupportedFeature.CONFIG_DOCUMENTATION_RETRIEVAL) && includeDoc;
|
||||
// we need to partition calls, because it can lead to AdminClient timeouts in case of large topics count
|
||||
|
@ -216,7 +224,8 @@ public class ReactiveAdminClient implements Closeable {
|
|||
client.describeConfigs(
|
||||
resources,
|
||||
new DescribeConfigsOptions().includeSynonyms(true).includeDocumentation(includeDoc)).values(),
|
||||
UnknownTopicOrPartitionException.class
|
||||
UnknownTopicOrPartitionException.class,
|
||||
TopicAuthorizationException.class
|
||||
).map(config -> config.entrySet().stream()
|
||||
.collect(toMap(
|
||||
c -> c.getKey().name(),
|
||||
|
@ -228,11 +237,17 @@ public class ReactiveAdminClient implements Closeable {
|
|||
.map(brokerId -> new ConfigResource(ConfigResource.Type.BROKER, Integer.toString(brokerId)))
|
||||
.collect(toList());
|
||||
return toMono(client.describeConfigs(resources).all())
|
||||
.doOnError(InvalidRequestException.class,
|
||||
th -> log.trace("Error while getting broker {} configs", brokerIds, th))
|
||||
// some kafka backends (like MSK serverless) do not support broker's configs retrieval,
|
||||
// in that case InvalidRequestException will be thrown
|
||||
.onErrorResume(InvalidRequestException.class, th -> Mono.just(Map.of()))
|
||||
.onErrorResume(InvalidRequestException.class, th -> {
|
||||
log.trace("Error while getting broker {} configs", brokerIds, th);
|
||||
return Mono.just(Map.of());
|
||||
})
|
||||
// there are situations when kafka-ui user has no DESCRIBE_CONFIGS permission on cluster
|
||||
.onErrorResume(ClusterAuthorizationException.class, th -> {
|
||||
log.trace("AuthorizationException while getting configs for brokers {}", brokerIds, th);
|
||||
return Mono.just(Map.of());
|
||||
})
|
||||
.map(config -> config.entrySet().stream()
|
||||
.collect(toMap(
|
||||
c -> Integer.valueOf(c.getKey().name()),
|
||||
|
@ -262,13 +277,16 @@ public class ReactiveAdminClient implements Closeable {
|
|||
|
||||
private Mono<Map<String, TopicDescription>> describeTopicsImpl(Collection<String> topics) {
|
||||
return toMonoWithExceptionFilter(
|
||||
client.describeTopics(topics).values(),
|
||||
UnknownTopicOrPartitionException.class
|
||||
client.describeTopics(topics).topicNameValues(),
|
||||
UnknownTopicOrPartitionException.class,
|
||||
// we only describe topics that we see from listTopics() API, so we should have permission to do it,
|
||||
// but also adding this exception here for rare case when access restricted after we called listTopics()
|
||||
TopicAuthorizationException.class
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns TopicDescription mono, or Empty Mono if topic not found.
|
||||
* Returns TopicDescription mono, or Empty Mono if topic not visible.
|
||||
*/
|
||||
public Mono<TopicDescription> describeTopic(String topic) {
|
||||
return describeTopics(List.of(topic)).flatMap(m -> Mono.justOrEmpty(m.get(topic)));
|
||||
|
@ -282,10 +300,11 @@ public class ReactiveAdminClient implements Closeable {
|
|||
* such topics in resulting map.
|
||||
* <p/>
|
||||
* This method converts input map into Mono[Map] ignoring keys for which KafkaFutures
|
||||
* finished with <code>clazz</code> exception and empty Monos.
|
||||
* finished with <code>classes</code> exceptions and empty Monos.
|
||||
*/
|
||||
@SafeVarargs
|
||||
static <K, V> Mono<Map<K, V>> toMonoWithExceptionFilter(Map<K, KafkaFuture<V>> values,
|
||||
Class<? extends KafkaException> clazz) {
|
||||
Class<? extends KafkaException>... classes) {
|
||||
if (values.isEmpty()) {
|
||||
return Mono.just(Map.of());
|
||||
}
|
||||
|
@ -297,7 +316,7 @@ public class ReactiveAdminClient implements Closeable {
|
|||
.defaultIfEmpty(Tuples.of(e.getKey(), Optional.empty())) //tracking empty Monos
|
||||
.onErrorResume(
|
||||
// tracking Monos with suppressible error
|
||||
th -> th.getClass().isAssignableFrom(clazz),
|
||||
th -> Stream.of(classes).anyMatch(clazz -> th.getClass().isAssignableFrom(clazz)),
|
||||
th -> Mono.just(Tuples.of(e.getKey(), Optional.empty()))))
|
||||
.toList();
|
||||
|
||||
|
@ -318,7 +337,13 @@ public class ReactiveAdminClient implements Closeable {
|
|||
|
||||
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs(
|
||||
Collection<Integer> brokerIds) {
|
||||
return toMono(client.describeLogDirs(brokerIds).all());
|
||||
return toMono(client.describeLogDirs(brokerIds).all())
|
||||
.onErrorResume(UnsupportedVersionException.class, th -> Mono.just(Map.of()))
|
||||
.onErrorResume(ClusterAuthorizationException.class, th -> Mono.just(Map.of()))
|
||||
.onErrorResume(th -> true, th -> {
|
||||
log.warn("Error while calling describeLogDirs", th);
|
||||
return Mono.just(Map.of());
|
||||
});
|
||||
}
|
||||
|
||||
public Mono<ClusterDescription> describeCluster() {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import static com.provectus.kafka.ui.service.ReactiveAdminClient.ClusterDescription;
|
||||
|
||||
import com.provectus.kafka.ui.model.ClusterFeature;
|
||||
import com.provectus.kafka.ui.model.InternalLogDirStats;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
|
@ -9,10 +11,12 @@ import com.provectus.kafka.ui.model.Statistics;
|
|||
import com.provectus.kafka.ui.service.metrics.MetricsCollector;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
|
@ -21,7 +25,7 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class StatisticsService {
|
||||
|
||||
private final MetricsCollector metricsClusterUtil;
|
||||
private final MetricsCollector metricsCollector;
|
||||
private final AdminClientService adminClientService;
|
||||
private final FeatureService featureService;
|
||||
private final StatisticsCache cache;
|
||||
|
@ -35,8 +39,8 @@ public class StatisticsService {
|
|||
ac.describeCluster().flatMap(description ->
|
||||
Mono.zip(
|
||||
List.of(
|
||||
metricsClusterUtil.getBrokerMetrics(cluster, description.getNodes()),
|
||||
getLogDirInfo(cluster, ac),
|
||||
metricsCollector.getBrokerMetrics(cluster, description.getNodes()),
|
||||
getLogDirInfo(description, ac),
|
||||
featureService.getAvailableFeatures(cluster, description.getController()),
|
||||
loadTopicConfigs(cluster),
|
||||
describeTopics(cluster)),
|
||||
|
@ -47,7 +51,7 @@ public class StatisticsService {
|
|||
.version(ac.getVersion())
|
||||
.metrics((Metrics) results[0])
|
||||
.logDirInfo((InternalLogDirStats) results[1])
|
||||
.features((List<Feature>) results[2])
|
||||
.features((List<ClusterFeature>) results[2])
|
||||
.topicConfigs((Map<String, List<ConfigEntry>>) results[3])
|
||||
.topicDescriptions((Map<String, TopicDescription>) results[4])
|
||||
.build()
|
||||
|
@ -58,11 +62,9 @@ public class StatisticsService {
|
|||
e -> Mono.just(Statistics.empty().toBuilder().lastKafkaException(e).build()));
|
||||
}
|
||||
|
||||
private Mono<InternalLogDirStats> getLogDirInfo(KafkaCluster cluster, ReactiveAdminClient c) {
|
||||
if (!cluster.isDisableLogDirsCollection()) {
|
||||
return c.describeLogDirs().map(InternalLogDirStats::new);
|
||||
}
|
||||
return Mono.just(InternalLogDirStats.empty());
|
||||
private Mono<InternalLogDirStats> getLogDirInfo(ClusterDescription desc, ReactiveAdminClient ac) {
|
||||
var brokerIds = desc.getNodes().stream().map(Node::id).collect(Collectors.toSet());
|
||||
return ac.describeLogDirs(brokerIds).map(InternalLogDirStats::new);
|
||||
}
|
||||
|
||||
private Mono<Map<String, TopicDescription>> describeTopics(KafkaCluster c) {
|
||||
|
|
|
@ -7,7 +7,7 @@ import com.provectus.kafka.ui.exception.TopicMetadataException;
|
|||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.TopicRecreationException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.Feature;
|
||||
import com.provectus.kafka.ui.model.ClusterFeature;
|
||||
import com.provectus.kafka.ui.model.InternalLogDirStats;
|
||||
import com.provectus.kafka.ui.model.InternalPartition;
|
||||
import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
|
||||
|
@ -162,9 +162,14 @@ public class TopicsService {
|
|||
}
|
||||
|
||||
public Mono<List<ConfigEntry>> getTopicConfigs(KafkaCluster cluster, String topicName) {
|
||||
// there 2 case that we cover here:
|
||||
// 1. topic not found/visible - describeTopic() will be empty and we will throw TopicNotFoundException
|
||||
// 2. topic is visible, but we don't have DESCRIBE_CONFIG permission - we should return empty list
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.getTopicsConfig(List.of(topicName), true))
|
||||
.map(m -> m.values().stream().findFirst().orElseThrow(TopicNotFoundException::new));
|
||||
.flatMap(ac -> ac.describeTopic(topicName)
|
||||
.switchIfEmpty(Mono.error(new TopicNotFoundException()))
|
||||
.then(ac.getTopicsConfig(List.of(topicName), true))
|
||||
.map(m -> m.values().stream().findFirst().orElse(List.of())));
|
||||
}
|
||||
|
||||
private Mono<InternalTopic> createTopic(KafkaCluster c, ReactiveAdminClient adminClient,
|
||||
|
@ -417,7 +422,7 @@ public class TopicsService {
|
|||
}
|
||||
|
||||
public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
|
||||
if (statisticsCache.get(cluster).getFeatures().contains(Feature.TOPIC_DELETION)) {
|
||||
if (statisticsCache.get(cluster).getFeatures().contains(ClusterFeature.TOPIC_DELETION)) {
|
||||
return adminClientService.get(cluster).flatMap(c -> c.deleteTopic(topicName))
|
||||
.doOnSuccess(t -> statisticsCache.onTopicDelete(cluster, topicName));
|
||||
} else {
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConnectorTypeDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.opendatadiscovery.oddrn.JdbcUrlParser;
|
||||
import org.opendatadiscovery.oddrn.model.HivePath;
|
||||
import org.opendatadiscovery.oddrn.model.MysqlPath;
|
||||
import org.opendatadiscovery.oddrn.model.PostgreSqlPath;
|
||||
import org.opendatadiscovery.oddrn.model.SnowflakePath;
|
||||
|
||||
record ConnectorInfo(List<String> inputs,
|
||||
List<String> outputs) {
|
||||
|
||||
static ConnectorInfo extract(String className,
|
||||
ConnectorTypeDTO type,
|
||||
Map<String, Object> config,
|
||||
List<String> topicsFromApi, // can be empty for old Connect API versions
|
||||
Function<String, String> topicOddrnBuilder) {
|
||||
return switch (className) {
|
||||
case "org.apache.kafka.connect.file.FileStreamSinkConnector",
|
||||
"org.apache.kafka.connect.file.FileStreamSourceConnector",
|
||||
"FileStreamSource",
|
||||
"FileStreamSink" -> extractFileIoConnector(type, topicsFromApi, config, topicOddrnBuilder);
|
||||
case "io.confluent.connect.s3.S3SinkConnector" -> extractS3Sink(type, topicsFromApi, config, topicOddrnBuilder);
|
||||
case "io.confluent.connect.jdbc.JdbcSinkConnector" ->
|
||||
extractJdbcSink(type, topicsFromApi, config, topicOddrnBuilder);
|
||||
case "io.debezium.connector.postgresql.PostgresConnector" -> extractDebeziumPg(config);
|
||||
case "io.debezium.connector.mysql.MySqlConnector" -> extractDebeziumMysql(config);
|
||||
default -> new ConnectorInfo(
|
||||
extractInputs(type, topicsFromApi, config, topicOddrnBuilder),
|
||||
extractOutputs(type, topicsFromApi, config, topicOddrnBuilder)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
||||
private static ConnectorInfo extractFileIoConnector(ConnectorTypeDTO type,
|
||||
List<String> topics,
|
||||
Map<String, Object> config,
|
||||
Function<String, String> topicOddrnBuilder) {
|
||||
return new ConnectorInfo(
|
||||
extractInputs(type, topics, config, topicOddrnBuilder),
|
||||
extractOutputs(type, topics, config, topicOddrnBuilder)
|
||||
);
|
||||
}
|
||||
|
||||
private static ConnectorInfo extractJdbcSink(ConnectorTypeDTO type,
|
||||
List<String> topics,
|
||||
Map<String, Object> config,
|
||||
Function<String, String> topicOddrnBuilder) {
|
||||
String tableNameFormat = (String) config.getOrDefault("table.name.format", "${topic}");
|
||||
List<String> targetTables = extractTopicNamesBestEffort(topics, config)
|
||||
.map(topic -> tableNameFormat.replace("${kafka}", topic))
|
||||
.toList();
|
||||
|
||||
String connectionUrl = (String) config.get("connection.url");
|
||||
List<String> outputs = new ArrayList<>();
|
||||
@Nullable var knownJdbcPath = new JdbcUrlParser().parse(connectionUrl);
|
||||
if (knownJdbcPath instanceof PostgreSqlPath p) {
|
||||
targetTables.forEach(t -> outputs.add(p.toBuilder().table(t).build().oddrn()));
|
||||
}
|
||||
if (knownJdbcPath instanceof MysqlPath p) {
|
||||
targetTables.forEach(t -> outputs.add(p.toBuilder().table(t).build().oddrn()));
|
||||
}
|
||||
if (knownJdbcPath instanceof HivePath p) {
|
||||
targetTables.forEach(t -> outputs.add(p.toBuilder().table(t).build().oddrn()));
|
||||
}
|
||||
if (knownJdbcPath instanceof SnowflakePath p) {
|
||||
targetTables.forEach(t -> outputs.add(p.toBuilder().table(t).build().oddrn()));
|
||||
}
|
||||
return new ConnectorInfo(
|
||||
extractInputs(type, topics, config, topicOddrnBuilder),
|
||||
outputs
|
||||
);
|
||||
}
|
||||
|
||||
private static ConnectorInfo extractDebeziumPg(Map<String, Object> config) {
|
||||
String host = (String) config.get("database.hostname");
|
||||
String dbName = (String) config.get("database.dbname");
|
||||
var inputs = List.of(
|
||||
PostgreSqlPath.builder()
|
||||
.host(host)
|
||||
.database(dbName)
|
||||
.build().oddrn()
|
||||
);
|
||||
return new ConnectorInfo(inputs, List.of());
|
||||
}
|
||||
|
||||
private static ConnectorInfo extractDebeziumMysql(Map<String, Object> config) {
|
||||
String host = (String) config.get("database.hostname");
|
||||
var inputs = List.of(
|
||||
MysqlPath.builder()
|
||||
.host(host)
|
||||
.build()
|
||||
.oddrn()
|
||||
);
|
||||
return new ConnectorInfo(inputs, List.of());
|
||||
}
|
||||
|
||||
private static ConnectorInfo extractS3Sink(ConnectorTypeDTO type,
|
||||
List<String> topics,
|
||||
Map<String, Object> config,
|
||||
Function<String, String> topicOrrdnBuilder) {
|
||||
String bucketName = (String) config.get("s3.bucket.name");
|
||||
String topicsDir = (String) config.getOrDefault("topics.dir", "topics");
|
||||
String directoryDelim = (String) config.getOrDefault("directory.delim", "/");
|
||||
List<String> outputs = extractTopicNamesBestEffort(topics, config)
|
||||
.map(topic -> Oddrn.awsS3Oddrn(bucketName, topicsDir + directoryDelim + topic))
|
||||
.toList();
|
||||
return new ConnectorInfo(
|
||||
extractInputs(type, topics, config, topicOrrdnBuilder),
|
||||
outputs
|
||||
);
|
||||
}
|
||||
|
||||
private static List<String> extractInputs(ConnectorTypeDTO type,
|
||||
List<String> topicsFromApi,
|
||||
Map<String, Object> config,
|
||||
Function<String, String> topicOrrdnBuilder) {
|
||||
return type == ConnectorTypeDTO.SINK
|
||||
? extractTopicsOddrns(config, topicsFromApi, topicOrrdnBuilder)
|
||||
: List.of();
|
||||
}
|
||||
|
||||
private static List<String> extractOutputs(ConnectorTypeDTO type,
|
||||
List<String> topicsFromApi,
|
||||
Map<String, Object> config,
|
||||
Function<String, String> topicOrrdnBuilder) {
|
||||
return type == ConnectorTypeDTO.SOURCE
|
||||
? extractTopicsOddrns(config, topicsFromApi, topicOrrdnBuilder)
|
||||
: List.of();
|
||||
}
|
||||
|
||||
private static Stream<String> extractTopicNamesBestEffort(
|
||||
// topic list can be empty for old Connect API versions
|
||||
List<String> topicsFromApi,
|
||||
Map<String, Object> config
|
||||
) {
|
||||
if (CollectionUtils.isNotEmpty(topicsFromApi)) {
|
||||
return topicsFromApi.stream();
|
||||
}
|
||||
|
||||
// trying to extract topic names from config
|
||||
String topicsString = (String) config.get("topics");
|
||||
String topicString = (String) config.get("topic");
|
||||
return Stream.of(topicsString, topicString)
|
||||
.filter(Objects::nonNull)
|
||||
.flatMap(str -> Stream.of(str.split(",")))
|
||||
.map(String::trim)
|
||||
.filter(s -> !s.isBlank());
|
||||
}
|
||||
|
||||
private static List<String> extractTopicsOddrns(Map<String, Object> config,
|
||||
List<String> topicsFromApi,
|
||||
Function<String, String> topicOrrdnBuilder) {
|
||||
return extractTopicNamesBestEffort(topicsFromApi, config)
|
||||
.map(topicOrrdnBuilder)
|
||||
.toList();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.provectus.kafka.ui.connect.model.ConnectorTopics;
|
||||
import com.provectus.kafka.ui.model.ConnectDTO;
|
||||
import com.provectus.kafka.ui.model.ConnectorDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.opendatadiscovery.client.model.DataEntity;
|
||||
import org.opendatadiscovery.client.model.DataEntityList;
|
||||
import org.opendatadiscovery.client.model.DataEntityType;
|
||||
import org.opendatadiscovery.client.model.DataSource;
|
||||
import org.opendatadiscovery.client.model.DataTransformer;
|
||||
import org.opendatadiscovery.client.model.MetadataExtension;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
class ConnectorsExporter {
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
|
||||
Flux<DataEntityList> export(KafkaCluster cluster) {
|
||||
return kafkaConnectService.getConnects(cluster)
|
||||
.flatMap(connect -> kafkaConnectService.getConnectorNames(cluster, connect.getName())
|
||||
.flatMap(connectorName -> kafkaConnectService.getConnector(cluster, connect.getName(), connectorName))
|
||||
.flatMap(connectorDTO ->
|
||||
kafkaConnectService.getConnectorTopics(cluster, connect.getName(), connectorDTO.getName())
|
||||
.map(topics -> createConnectorDataEntity(cluster, connect, connectorDTO, topics)))
|
||||
.buffer(100)
|
||||
.map(connectDataEntities -> {
|
||||
String dsOddrn = Oddrn.connectDataSourceOddrn(connect.getAddress());
|
||||
return new DataEntityList()
|
||||
.dataSourceOddrn(dsOddrn)
|
||||
.items(connectDataEntities);
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
Flux<DataSource> getConnectDataSources(KafkaCluster cluster) {
|
||||
return kafkaConnectService.getConnects(cluster)
|
||||
.map(ConnectorsExporter::toDataSource);
|
||||
}
|
||||
|
||||
private static DataSource toDataSource(ConnectDTO connect) {
|
||||
return new DataSource()
|
||||
.oddrn(Oddrn.connectDataSourceOddrn(connect.getAddress()))
|
||||
.name(connect.getName())
|
||||
.description("Kafka Connect");
|
||||
}
|
||||
|
||||
private static DataEntity createConnectorDataEntity(KafkaCluster cluster,
|
||||
ConnectDTO connect,
|
||||
ConnectorDTO connector,
|
||||
ConnectorTopics connectorTopics) {
|
||||
var metadata = new HashMap<>(extractMetadata(connector));
|
||||
metadata.put("type", connector.getType().name());
|
||||
|
||||
var info = extractConnectorInfo(cluster, connector, connectorTopics);
|
||||
DataTransformer transformer = new DataTransformer();
|
||||
transformer.setInputs(info.inputs());
|
||||
transformer.setOutputs(info.outputs());
|
||||
|
||||
return new DataEntity()
|
||||
.oddrn(Oddrn.connectorOddrn(connect.getAddress(), connector.getName()))
|
||||
.name(connector.getName())
|
||||
.description("Kafka Connector \"%s\" (%s)".formatted(connector.getName(), connector.getType()))
|
||||
.type(DataEntityType.JOB)
|
||||
.dataTransformer(transformer)
|
||||
.metadata(List.of(
|
||||
new MetadataExtension()
|
||||
.schemaUrl(URI.create("wontbeused.oops"))
|
||||
.metadata(metadata)));
|
||||
}
|
||||
|
||||
private static Map<String, Object> extractMetadata(ConnectorDTO connector) {
|
||||
// will be sanitized by KafkaConfigSanitizer (if it's enabled)
|
||||
return connector.getConfig();
|
||||
}
|
||||
|
||||
private static ConnectorInfo extractConnectorInfo(KafkaCluster cluster,
|
||||
ConnectorDTO connector,
|
||||
ConnectorTopics topics) {
|
||||
return ConnectorInfo.extract(
|
||||
(String) connector.getConfig().get("connector.class"),
|
||||
connector.getType(),
|
||||
connector.getConfig(),
|
||||
topics.getTopics(),
|
||||
topic -> Oddrn.topicOddrn(cluster, topic)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import com.provectus.kafka.ui.service.StatisticsCache;
|
||||
import java.util.List;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
import lombok.SneakyThrows;
|
||||
import org.opendatadiscovery.client.ApiClient;
|
||||
import org.opendatadiscovery.client.api.OpenDataDiscoveryIngestionApi;
|
||||
import org.opendatadiscovery.client.model.DataEntity;
|
||||
import org.opendatadiscovery.client.model.DataEntityList;
|
||||
import org.opendatadiscovery.client.model.DataSource;
|
||||
import org.opendatadiscovery.client.model.DataSourceList;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
class OddExporter {
|
||||
|
||||
private final OpenDataDiscoveryIngestionApi oddApi;
|
||||
private final TopicsExporter topicsExporter;
|
||||
private final ConnectorsExporter connectorsExporter;
|
||||
|
||||
public OddExporter(StatisticsCache statisticsCache,
|
||||
KafkaConnectService connectService,
|
||||
OddIntegrationProperties oddIntegrationProperties) {
|
||||
this(
|
||||
createApiClient(oddIntegrationProperties),
|
||||
new TopicsExporter(createTopicsFilter(oddIntegrationProperties), statisticsCache),
|
||||
new ConnectorsExporter(connectService)
|
||||
);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
OddExporter(OpenDataDiscoveryIngestionApi oddApi,
|
||||
TopicsExporter topicsExporter,
|
||||
ConnectorsExporter connectorsExporter) {
|
||||
this.oddApi = oddApi;
|
||||
this.topicsExporter = topicsExporter;
|
||||
this.connectorsExporter = connectorsExporter;
|
||||
}
|
||||
|
||||
private static Predicate<String> createTopicsFilter(OddIntegrationProperties properties) {
|
||||
if (properties.getTopicsRegex() == null) {
|
||||
return topic -> !topic.startsWith("_");
|
||||
}
|
||||
Pattern pattern = Pattern.compile(properties.getTopicsRegex());
|
||||
return topic -> pattern.matcher(topic).matches();
|
||||
}
|
||||
|
||||
private static OpenDataDiscoveryIngestionApi createApiClient(OddIntegrationProperties properties) {
|
||||
Preconditions.checkNotNull(properties.getUrl(), "ODD url not set");
|
||||
Preconditions.checkNotNull(properties.getToken(), "ODD token not set");
|
||||
var apiClient = new ApiClient()
|
||||
.setBasePath(properties.getUrl())
|
||||
.addDefaultHeader(HttpHeaders.AUTHORIZATION, "Bearer " + properties.getToken());
|
||||
return new OpenDataDiscoveryIngestionApi(apiClient);
|
||||
}
|
||||
|
||||
public Mono<Void> export(KafkaCluster cluster) {
|
||||
return exportTopics(cluster)
|
||||
.then(exportKafkaConnects(cluster));
|
||||
}
|
||||
|
||||
private Mono<Void> exportTopics(KafkaCluster c) {
|
||||
return createKafkaDataSource(c)
|
||||
.thenMany(topicsExporter.export(c))
|
||||
.concatMap(this::sentDataEntities)
|
||||
.then();
|
||||
}
|
||||
|
||||
private Mono<Void> exportKafkaConnects(KafkaCluster cluster) {
|
||||
return createConnectDataSources(cluster)
|
||||
.thenMany(connectorsExporter.export(cluster))
|
||||
.concatMap(this::sentDataEntities)
|
||||
.then();
|
||||
}
|
||||
|
||||
private Mono<Void> createConnectDataSources(KafkaCluster cluster) {
|
||||
return connectorsExporter.getConnectDataSources(cluster)
|
||||
.buffer(100)
|
||||
.concatMap(dataSources -> oddApi.createDataSource(new DataSourceList().items(dataSources)))
|
||||
.then();
|
||||
}
|
||||
|
||||
private Mono<Void> createKafkaDataSource(KafkaCluster cluster) {
|
||||
String clusterOddrn = Oddrn.clusterOddrn(cluster);
|
||||
return oddApi.createDataSource(
|
||||
new DataSourceList()
|
||||
.addItemsItem(
|
||||
new DataSource()
|
||||
.oddrn(clusterOddrn)
|
||||
.name(cluster.getName())
|
||||
.description("Kafka cluster")
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private Mono<Void> sentDataEntities(DataEntityList dataEntityList) {
|
||||
return oddApi.postDataEntityList(dataEntityList);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
class OddExporterScheduler {
|
||||
|
||||
private final ClustersStorage clustersStorage;
|
||||
private final OddExporter oddExporter;
|
||||
|
||||
@Scheduled(fixedRateString = "${kafka.send-stats-to-odd-millis:30000}")
|
||||
public void sendMetricsToOdd() {
|
||||
Flux.fromIterable(clustersStorage.getKafkaClusters())
|
||||
.parallel()
|
||||
.runOn(Schedulers.parallel())
|
||||
.flatMap(oddExporter::export)
|
||||
.then()
|
||||
.block();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,31 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import com.provectus.kafka.ui.service.StatisticsCache;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
@Configuration
|
||||
@ConditionalOnProperty(value = "integration.odd.url")
|
||||
class OddIntegrationConfig {
|
||||
|
||||
@Bean
|
||||
OddIntegrationProperties oddIntegrationProperties() {
|
||||
return new OddIntegrationProperties();
|
||||
}
|
||||
|
||||
@Bean
|
||||
OddExporter oddExporter(StatisticsCache statisticsCache,
|
||||
KafkaConnectService connectService,
|
||||
OddIntegrationProperties oddIntegrationProperties) {
|
||||
return new OddExporter(statisticsCache, connectService, oddIntegrationProperties);
|
||||
}
|
||||
|
||||
@Bean
|
||||
OddExporterScheduler oddExporterScheduler(ClustersStorage storage, OddExporter exporter) {
|
||||
return new OddExporterScheduler(storage, exporter);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
|
||||
@Data
|
||||
@ConfigurationProperties("integration.odd")
|
||||
public class OddIntegrationProperties {
|
||||
|
||||
String url;
|
||||
String token;
|
||||
String topicsRegex;
|
||||
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.net.URI;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.experimental.UtilityClass;
|
||||
import org.opendatadiscovery.oddrn.Generator;
|
||||
import org.opendatadiscovery.oddrn.model.AwsS3Path;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaConnectorPath;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaPath;
|
||||
|
||||
@UtilityClass
|
||||
public class Oddrn {
|
||||
|
||||
private static final Generator GENERATOR = new Generator();
|
||||
|
||||
String clusterOddrn(KafkaCluster cluster) {
|
||||
return KafkaPath.builder()
|
||||
.cluster(bootstrapServersForOddrn(cluster.getBootstrapServers()))
|
||||
.build()
|
||||
.oddrn();
|
||||
}
|
||||
|
||||
KafkaPath topicOddrnPath(KafkaCluster cluster, String topic) {
|
||||
return KafkaPath.builder()
|
||||
.cluster(bootstrapServersForOddrn(cluster.getBootstrapServers()))
|
||||
.topic(topic)
|
||||
.build();
|
||||
}
|
||||
|
||||
String topicOddrn(KafkaCluster cluster, String topic) {
|
||||
return topicOddrnPath(cluster, topic).oddrn();
|
||||
}
|
||||
|
||||
String awsS3Oddrn(String bucket, String key) {
|
||||
return AwsS3Path.builder()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.build()
|
||||
.oddrn();
|
||||
}
|
||||
|
||||
String connectDataSourceOddrn(String connectUrl) {
|
||||
return KafkaConnectorPath.builder()
|
||||
.host(normalizedConnectHosts(connectUrl))
|
||||
.build()
|
||||
.oddrn();
|
||||
}
|
||||
|
||||
private String normalizedConnectHosts(String connectUrlStr) {
|
||||
return Stream.of(connectUrlStr.split(","))
|
||||
.map(String::trim)
|
||||
.sorted()
|
||||
.map(url -> {
|
||||
var uri = URI.create(url);
|
||||
String host = uri.getHost();
|
||||
String portSuffix = (uri.getPort() > 0 ? (":" + uri.getPort()) : "");
|
||||
return host + portSuffix;
|
||||
})
|
||||
.collect(Collectors.joining(","));
|
||||
}
|
||||
|
||||
String connectorOddrn(String connectUrl, String connectorName) {
|
||||
return KafkaConnectorPath.builder()
|
||||
.host(normalizedConnectHosts(connectUrl))
|
||||
.connector(connectorName)
|
||||
.build()
|
||||
.oddrn();
|
||||
}
|
||||
|
||||
private String bootstrapServersForOddrn(String bootstrapServers) {
|
||||
return Stream.of(bootstrapServers.split(","))
|
||||
.map(String::trim)
|
||||
.sorted()
|
||||
.collect(Collectors.joining(","));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Statistics;
|
||||
import com.provectus.kafka.ui.service.StatisticsCache;
|
||||
import com.provectus.kafka.ui.service.integration.odd.schema.DataSetFieldsExtractors;
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.opendatadiscovery.client.model.DataEntity;
|
||||
import org.opendatadiscovery.client.model.DataEntityList;
|
||||
import org.opendatadiscovery.client.model.DataEntityType;
|
||||
import org.opendatadiscovery.client.model.DataSet;
|
||||
import org.opendatadiscovery.client.model.DataSetField;
|
||||
import org.opendatadiscovery.client.model.MetadataExtension;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaPath;
|
||||
import org.springframework.web.reactive.function.client.WebClientResponseException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
class TopicsExporter {
|
||||
|
||||
private final Predicate<String> topicFilter;
|
||||
private final StatisticsCache statisticsCache;
|
||||
|
||||
Flux<DataEntityList> export(KafkaCluster cluster) {
|
||||
String clusterOddrn = Oddrn.clusterOddrn(cluster);
|
||||
Statistics stats = statisticsCache.get(cluster);
|
||||
return Flux.fromIterable(stats.getTopicDescriptions().keySet())
|
||||
.filter(topicFilter)
|
||||
.flatMap(topic -> createTopicDataEntity(cluster, topic, stats))
|
||||
.buffer(100)
|
||||
.map(topicsEntities ->
|
||||
new DataEntityList()
|
||||
.dataSourceOddrn(clusterOddrn)
|
||||
.items(topicsEntities));
|
||||
}
|
||||
|
||||
private Mono<DataEntity> createTopicDataEntity(KafkaCluster cluster, String topic, Statistics stats) {
|
||||
KafkaPath topicOddrnPath = Oddrn.topicOddrnPath(cluster, topic);
|
||||
return
|
||||
Mono.zip(
|
||||
getTopicSchema(cluster, topic, topicOddrnPath, true),
|
||||
getTopicSchema(cluster, topic, topicOddrnPath, false)
|
||||
)
|
||||
.map(keyValueFields -> {
|
||||
var dataset = new DataSet();
|
||||
keyValueFields.getT1().forEach(dataset::addFieldListItem);
|
||||
keyValueFields.getT2().forEach(dataset::addFieldListItem);
|
||||
return new DataEntity()
|
||||
.name(topic)
|
||||
.description("Kafka topic \"%s\"".formatted(topic))
|
||||
.oddrn(Oddrn.topicOddrn(cluster, topic))
|
||||
.type(DataEntityType.KAFKA_TOPIC)
|
||||
.dataset(dataset)
|
||||
.addMetadataItem(
|
||||
new MetadataExtension()
|
||||
.schemaUrl(URI.create("wontbeused.oops"))
|
||||
.metadata(getTopicMetadata(topic, stats)));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
private Map<String, Object> getNonDefaultConfigs(String topic, Statistics stats) {
|
||||
List<ConfigEntry> config = stats.getTopicConfigs().get(topic);
|
||||
if (config == null) {
|
||||
return Map.of();
|
||||
}
|
||||
return config.stream()
|
||||
.filter(c -> c.source() == ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG)
|
||||
.collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value));
|
||||
}
|
||||
|
||||
private Map<String, Object> getTopicMetadata(String topic, Statistics stats) {
|
||||
TopicDescription topicDescription = stats.getTopicDescriptions().get(topic);
|
||||
return ImmutableMap.<String, Object>builder()
|
||||
.put("partitions", topicDescription.partitions().size())
|
||||
.put("replication_factor", topicDescription.partitions().get(0).replicas().size())
|
||||
.putAll(getNonDefaultConfigs(topic, stats))
|
||||
.build();
|
||||
}
|
||||
|
||||
private Mono<List<DataSetField>> getTopicSchema(KafkaCluster cluster,
|
||||
String topic,
|
||||
KafkaPath topicOddrn,
|
||||
//currently we only retrieve value schema
|
||||
boolean isKey) {
|
||||
if (cluster.getSchemaRegistryClient() == null) {
|
||||
return Mono.just(List.of());
|
||||
}
|
||||
String subject = topic + (isKey ? "-key" : "-value");
|
||||
return cluster.getSchemaRegistryClient()
|
||||
.mono(client -> client.getSubjectVersion(subject, "latest"))
|
||||
.map(subj -> DataSetFieldsExtractors.extract(subj, topicOddrn, isKey))
|
||||
.onErrorResume(WebClientResponseException.NotFound.class, th -> Mono.just(List.of()))
|
||||
.onErrorResume(th -> true, th -> {
|
||||
log.warn("Error retrieving subject {} for cluster {}", subject, cluster.getName(), th);
|
||||
return Mono.just(List.of());
|
||||
});
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd.schema;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.provectus.kafka.ui.service.integration.odd.Oddrn;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaSubject;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import lombok.experimental.UtilityClass;
|
||||
import org.apache.avro.Schema;
|
||||
import org.opendatadiscovery.client.model.DataSetField;
|
||||
import org.opendatadiscovery.client.model.DataSetFieldType;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaPath;
|
||||
|
||||
@UtilityClass
|
||||
class AvroExtractor {
|
||||
|
||||
static List<DataSetField> extract(SchemaSubject subject, KafkaPath topicOddrn, boolean isKey) {
|
||||
var schema = new Schema.Parser().parse(subject.getSchema());
|
||||
List<DataSetField> result = new ArrayList<>();
|
||||
result.add(DataSetFieldsExtractors.rootField(topicOddrn, isKey));
|
||||
extract(
|
||||
schema,
|
||||
topicOddrn.oddrn() + "/columns/" + (isKey ? "key" : "value"),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
false,
|
||||
ImmutableSet.of(),
|
||||
result
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
private void extract(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
String doc,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink
|
||||
) {
|
||||
switch (schema.getType()) {
|
||||
case RECORD -> extractRecord(schema, parentOddr, oddrn, name, doc, nullable, registeredRecords, sink);
|
||||
case UNION -> extractUnion(schema, parentOddr, oddrn, name, doc, registeredRecords, sink);
|
||||
case ARRAY -> extractArray(schema, parentOddr, oddrn, name, doc, nullable, registeredRecords, sink);
|
||||
case MAP -> extractMap(schema, parentOddr, oddrn, name, doc, nullable, registeredRecords, sink);
|
||||
default -> extractPrimitive(schema, parentOddr, oddrn, name, doc, nullable, sink);
|
||||
}
|
||||
}
|
||||
|
||||
private DataSetField createDataSetField(String name,
|
||||
String doc,
|
||||
String parentOddrn,
|
||||
String oddrn,
|
||||
Schema schema,
|
||||
Boolean nullable) {
|
||||
return new DataSetField()
|
||||
.name(name)
|
||||
.description(doc)
|
||||
.parentFieldOddrn(parentOddrn)
|
||||
.oddrn(oddrn)
|
||||
.type(mapSchema(schema, nullable));
|
||||
}
|
||||
|
||||
private void extractRecord(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
String doc,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
if (!isRoot) {
|
||||
sink.add(createDataSetField(name, doc, parentOddr, oddrn, schema, nullable));
|
||||
if (registeredRecords.contains(schema.getFullName())) {
|
||||
// avoiding recursion by checking if record already registered in parsing chain
|
||||
return;
|
||||
}
|
||||
}
|
||||
var newRegisteredRecords = ImmutableSet.<String>builder()
|
||||
.addAll(registeredRecords)
|
||||
.add(schema.getFullName())
|
||||
.build();
|
||||
|
||||
schema.getFields().forEach(f ->
|
||||
extract(
|
||||
f.schema(),
|
||||
isRoot ? parentOddr : oddrn,
|
||||
isRoot
|
||||
? parentOddr + "/" + f.name()
|
||||
: oddrn + "/fields/" + f.name(),
|
||||
f.name(),
|
||||
f.doc(),
|
||||
false,
|
||||
newRegisteredRecords,
|
||||
sink
|
||||
));
|
||||
}
|
||||
|
||||
private void extractUnion(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
String doc,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
boolean containsNull = schema.getTypes().stream().map(Schema::getType).anyMatch(t -> t == Schema.Type.NULL);
|
||||
// if it is not root and there is only 2 values for union (null and smth else)
|
||||
// we registering this field as optional without mentioning union
|
||||
if (!isRoot && containsNull && schema.getTypes().size() == 2) {
|
||||
var nonNullSchema = schema.getTypes().stream()
|
||||
.filter(s -> s.getType() != Schema.Type.NULL)
|
||||
.findFirst()
|
||||
.orElseThrow(IllegalStateException::new);
|
||||
extract(
|
||||
nonNullSchema,
|
||||
parentOddr,
|
||||
oddrn,
|
||||
name,
|
||||
doc,
|
||||
true,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
return;
|
||||
}
|
||||
oddrn = isRoot ? parentOddr + "/union" : oddrn;
|
||||
if (isRoot) {
|
||||
sink.add(createDataSetField("Avro root union", doc, parentOddr, oddrn, schema, containsNull));
|
||||
} else {
|
||||
sink.add(createDataSetField(name, doc, parentOddr, oddrn, schema, containsNull));
|
||||
}
|
||||
for (Schema t : schema.getTypes()) {
|
||||
if (t.getType() != Schema.Type.NULL) {
|
||||
extract(
|
||||
t,
|
||||
oddrn,
|
||||
oddrn + "/values/" + t.getName(),
|
||||
t.getName(),
|
||||
t.getDoc(),
|
||||
containsNull,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void extractArray(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
String doc,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
oddrn = isRoot ? parentOddr + "/array" : oddrn;
|
||||
if (isRoot) {
|
||||
sink.add(createDataSetField("Avro root Array", doc, parentOddr, oddrn, schema, nullable));
|
||||
} else {
|
||||
sink.add(createDataSetField(name, doc, parentOddr, oddrn, schema, nullable));
|
||||
}
|
||||
extract(
|
||||
schema.getElementType(),
|
||||
oddrn,
|
||||
oddrn + "/items/" + schema.getElementType().getName(),
|
||||
schema.getElementType().getName(),
|
||||
schema.getElementType().getDoc(),
|
||||
false,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
}
|
||||
|
||||
private void extractMap(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
String doc,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
oddrn = isRoot ? parentOddr + "/map" : oddrn;
|
||||
if (isRoot) {
|
||||
sink.add(createDataSetField("Avro root map", doc, parentOddr, oddrn, schema, nullable));
|
||||
} else {
|
||||
sink.add(createDataSetField(name, doc, parentOddr, oddrn, schema, nullable));
|
||||
}
|
||||
extract(
|
||||
new Schema.Parser().parse("\"string\""),
|
||||
oddrn,
|
||||
oddrn + "/key",
|
||||
"key",
|
||||
null,
|
||||
nullable,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
extract(
|
||||
schema.getValueType(),
|
||||
oddrn,
|
||||
oddrn + "/value",
|
||||
"value",
|
||||
null,
|
||||
nullable,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
private void extractPrimitive(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
String doc,
|
||||
Boolean nullable,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
String primOddrn = isRoot ? (parentOddr + "/" + schema.getType()) : oddrn;
|
||||
if (isRoot) {
|
||||
sink.add(createDataSetField("Root avro " + schema.getType(),
|
||||
doc, parentOddr, primOddrn, schema, nullable));
|
||||
} else {
|
||||
sink.add(createDataSetField(name, doc, parentOddr, primOddrn, schema, nullable));
|
||||
}
|
||||
}
|
||||
|
||||
private DataSetFieldType.TypeEnum mapType(Schema.Type type) {
|
||||
return switch (type) {
|
||||
case INT, LONG -> DataSetFieldType.TypeEnum.INTEGER;
|
||||
case FLOAT, DOUBLE, FIXED -> DataSetFieldType.TypeEnum.NUMBER;
|
||||
case STRING, ENUM -> DataSetFieldType.TypeEnum.STRING;
|
||||
case BOOLEAN -> DataSetFieldType.TypeEnum.BOOLEAN;
|
||||
case BYTES -> DataSetFieldType.TypeEnum.BINARY;
|
||||
case ARRAY -> DataSetFieldType.TypeEnum.LIST;
|
||||
case RECORD -> DataSetFieldType.TypeEnum.STRUCT;
|
||||
case MAP -> DataSetFieldType.TypeEnum.MAP;
|
||||
case UNION -> DataSetFieldType.TypeEnum.UNION;
|
||||
case NULL -> DataSetFieldType.TypeEnum.UNKNOWN;
|
||||
};
|
||||
}
|
||||
|
||||
private DataSetFieldType mapSchema(Schema schema, Boolean nullable) {
|
||||
return new DataSetFieldType()
|
||||
.logicalType(logicalType(schema))
|
||||
.isNullable(nullable)
|
||||
.type(mapType(schema.getType()));
|
||||
}
|
||||
|
||||
private String logicalType(Schema schema) {
|
||||
return schema.getType() == Schema.Type.RECORD
|
||||
? schema.getFullName()
|
||||
: schema.getType().toString().toLowerCase();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd.schema;
|
||||
|
||||
import com.provectus.kafka.ui.service.integration.odd.Oddrn;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaSubject;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaType;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import lombok.experimental.UtilityClass;
|
||||
import org.opendatadiscovery.client.model.DataSetField;
|
||||
import org.opendatadiscovery.client.model.DataSetFieldType;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaPath;
|
||||
|
||||
@UtilityClass
|
||||
public class DataSetFieldsExtractors {
|
||||
|
||||
public List<DataSetField> extract(SchemaSubject subject, KafkaPath topicOddrn, boolean isKey) {
|
||||
SchemaType schemaType = Optional.ofNullable(subject.getSchemaType()).orElse(SchemaType.AVRO);
|
||||
return switch (schemaType) {
|
||||
case AVRO -> AvroExtractor.extract(subject, topicOddrn, isKey);
|
||||
case JSON -> JsonSchemaExtractor.extract(subject, topicOddrn, isKey);
|
||||
case PROTOBUF -> ProtoExtractor.extract(subject, topicOddrn, isKey);
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
DataSetField rootField(KafkaPath topicOddrn, boolean isKey) {
|
||||
var rootOddrn = topicOddrn.oddrn() + "/columns/" + (isKey ? "key" : "value");
|
||||
return new DataSetField()
|
||||
.name(isKey ? "key" : "value")
|
||||
.description("Topic's " + (isKey ? "key" : "value") + " schema")
|
||||
.parentFieldOddrn(topicOddrn.oddrn())
|
||||
.oddrn(rootOddrn)
|
||||
.type(new DataSetFieldType()
|
||||
.type(DataSetFieldType.TypeEnum.STRUCT)
|
||||
.isNullable(true));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,311 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd.schema;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.provectus.kafka.ui.service.integration.odd.Oddrn;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaSubject;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import java.net.URI;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.experimental.UtilityClass;
|
||||
import org.everit.json.schema.ArraySchema;
|
||||
import org.everit.json.schema.BooleanSchema;
|
||||
import org.everit.json.schema.CombinedSchema;
|
||||
import org.everit.json.schema.FalseSchema;
|
||||
import org.everit.json.schema.NullSchema;
|
||||
import org.everit.json.schema.NumberSchema;
|
||||
import org.everit.json.schema.ObjectSchema;
|
||||
import org.everit.json.schema.ReferenceSchema;
|
||||
import org.everit.json.schema.Schema;
|
||||
import org.everit.json.schema.StringSchema;
|
||||
import org.everit.json.schema.TrueSchema;
|
||||
import org.opendatadiscovery.client.model.DataSetField;
|
||||
import org.opendatadiscovery.client.model.DataSetFieldType;
|
||||
import org.opendatadiscovery.client.model.MetadataExtension;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaPath;
|
||||
|
||||
@UtilityClass
|
||||
class JsonSchemaExtractor {
|
||||
|
||||
static List<DataSetField> extract(SchemaSubject subject, KafkaPath topicOddrn, boolean isKey) {
|
||||
Schema schema = new JsonSchema(subject.getSchema()).rawSchema();
|
||||
List<DataSetField> result = new ArrayList<>();
|
||||
result.add(DataSetFieldsExtractors.rootField(topicOddrn, isKey));
|
||||
extract(
|
||||
schema,
|
||||
topicOddrn.oddrn() + "/columns/" + (isKey ? "key" : "value"),
|
||||
null,
|
||||
null,
|
||||
null,
|
||||
ImmutableSet.of(),
|
||||
result
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
private void extract(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
if (schema instanceof ReferenceSchema s) {
|
||||
Optional.ofNullable(s.getReferredSchema())
|
||||
.ifPresent(refSchema -> extract(refSchema, parentOddr, oddrn, name, nullable, registeredRecords, sink));
|
||||
} else if (schema instanceof ObjectSchema s) {
|
||||
extractObject(s, parentOddr, oddrn, name, nullable, registeredRecords, sink);
|
||||
} else if (schema instanceof ArraySchema s) {
|
||||
extractArray(s, parentOddr, oddrn, name, nullable, registeredRecords, sink);
|
||||
} else if (schema instanceof CombinedSchema cs) {
|
||||
extractCombined(cs, parentOddr, oddrn, name, nullable, registeredRecords, sink);
|
||||
} else if (schema instanceof BooleanSchema
|
||||
|| schema instanceof NumberSchema
|
||||
|| schema instanceof StringSchema
|
||||
|| schema instanceof NullSchema
|
||||
) {
|
||||
extractPrimitive(schema, parentOddr, oddrn, name, nullable, sink);
|
||||
} else {
|
||||
extractUnknown(schema, parentOddr, oddrn, name, nullable, sink);
|
||||
}
|
||||
}
|
||||
|
||||
private void extractPrimitive(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
Boolean nullable,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
sink.add(
|
||||
createDataSetField(
|
||||
schema,
|
||||
isRoot ? "Root JSON primitive" : name,
|
||||
parentOddr,
|
||||
isRoot ? (parentOddr + "/" + logicalTypeName(schema)) : oddrn,
|
||||
mapType(schema),
|
||||
logicalTypeName(schema),
|
||||
nullable
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private void extractUnknown(Schema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
Boolean nullable,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
sink.add(
|
||||
createDataSetField(
|
||||
schema,
|
||||
isRoot ? "Root type " + logicalTypeName(schema) : name,
|
||||
parentOddr,
|
||||
isRoot ? (parentOddr + "/" + logicalTypeName(schema)) : oddrn,
|
||||
DataSetFieldType.TypeEnum.UNKNOWN,
|
||||
logicalTypeName(schema),
|
||||
nullable
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private void extractObject(ObjectSchema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
// schemaLocation can be null for empty object schemas (like if it used in anyOf)
|
||||
@Nullable var schemaLocation = schema.getSchemaLocation();
|
||||
if (!isRoot) {
|
||||
sink.add(createDataSetField(
|
||||
schema,
|
||||
name,
|
||||
parentOddr,
|
||||
oddrn,
|
||||
DataSetFieldType.TypeEnum.STRUCT,
|
||||
logicalTypeName(schema),
|
||||
nullable
|
||||
));
|
||||
if (schemaLocation != null && registeredRecords.contains(schemaLocation)) {
|
||||
// avoiding recursion by checking if record already registered in parsing chain
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
var newRegisteredRecords = schemaLocation == null
|
||||
? registeredRecords
|
||||
: ImmutableSet.<String>builder()
|
||||
.addAll(registeredRecords)
|
||||
.add(schemaLocation)
|
||||
.build();
|
||||
|
||||
schema.getPropertySchemas().forEach((propertyName, propertySchema) -> {
|
||||
boolean required = schema.getRequiredProperties().contains(propertyName);
|
||||
extract(
|
||||
propertySchema,
|
||||
isRoot ? parentOddr : oddrn,
|
||||
isRoot
|
||||
? parentOddr + "/" + propertyName
|
||||
: oddrn + "/fields/" + propertyName,
|
||||
propertyName,
|
||||
!required,
|
||||
newRegisteredRecords,
|
||||
sink
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
private void extractArray(ArraySchema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
boolean isRoot = oddrn == null;
|
||||
oddrn = isRoot ? parentOddr + "/array" : oddrn;
|
||||
if (isRoot) {
|
||||
sink.add(
|
||||
createDataSetField(
|
||||
schema,
|
||||
"Json array root",
|
||||
parentOddr,
|
||||
oddrn,
|
||||
DataSetFieldType.TypeEnum.LIST,
|
||||
"array",
|
||||
nullable
|
||||
));
|
||||
} else {
|
||||
sink.add(
|
||||
createDataSetField(
|
||||
schema,
|
||||
name,
|
||||
parentOddr,
|
||||
oddrn,
|
||||
DataSetFieldType.TypeEnum.LIST,
|
||||
"array",
|
||||
nullable
|
||||
));
|
||||
}
|
||||
@Nullable var itemsSchema = schema.getAllItemSchema();
|
||||
if (itemsSchema != null) {
|
||||
extract(
|
||||
itemsSchema,
|
||||
oddrn,
|
||||
oddrn + "/items/" + logicalTypeName(itemsSchema),
|
||||
logicalTypeName(itemsSchema),
|
||||
false,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private void extractCombined(CombinedSchema schema,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
Boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
String combineType = "unknown";
|
||||
if (schema.getCriterion() == CombinedSchema.ALL_CRITERION) {
|
||||
combineType = "allOf";
|
||||
}
|
||||
if (schema.getCriterion() == CombinedSchema.ANY_CRITERION) {
|
||||
combineType = "anyOf";
|
||||
}
|
||||
if (schema.getCriterion() == CombinedSchema.ONE_CRITERION) {
|
||||
combineType = "oneOf";
|
||||
}
|
||||
|
||||
boolean isRoot = oddrn == null;
|
||||
oddrn = isRoot ? (parentOddr + "/" + combineType) : (oddrn + "/" + combineType);
|
||||
sink.add(
|
||||
createDataSetField(
|
||||
schema,
|
||||
isRoot ? "Root %s".formatted(combineType) : name,
|
||||
parentOddr,
|
||||
oddrn,
|
||||
DataSetFieldType.TypeEnum.UNION,
|
||||
combineType,
|
||||
nullable
|
||||
).addMetadataItem(new MetadataExtension()
|
||||
.schemaUrl(URI.create("wontbeused.oops"))
|
||||
.metadata(Map.of("criterion", combineType)))
|
||||
);
|
||||
|
||||
for (Schema subschema : schema.getSubschemas()) {
|
||||
extract(
|
||||
subschema,
|
||||
oddrn,
|
||||
oddrn + "/values/" + logicalTypeName(subschema),
|
||||
logicalTypeName(subschema),
|
||||
nullable,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private String getDescription(Schema schema) {
|
||||
return Optional.ofNullable(schema.getTitle())
|
||||
.orElse(schema.getDescription());
|
||||
}
|
||||
|
||||
private String logicalTypeName(Schema schema) {
|
||||
return schema.getClass()
|
||||
.getSimpleName()
|
||||
.replace("Schema", "");
|
||||
}
|
||||
|
||||
private DataSetField createDataSetField(Schema schema,
|
||||
String name,
|
||||
String parentOddrn,
|
||||
String oddrn,
|
||||
DataSetFieldType.TypeEnum type,
|
||||
String logicalType,
|
||||
Boolean nullable) {
|
||||
return new DataSetField()
|
||||
.name(name)
|
||||
.parentFieldOddrn(parentOddrn)
|
||||
.oddrn(oddrn)
|
||||
.description(getDescription(schema))
|
||||
.type(
|
||||
new DataSetFieldType()
|
||||
.isNullable(nullable)
|
||||
.logicalType(logicalType)
|
||||
.type(type)
|
||||
);
|
||||
}
|
||||
|
||||
private DataSetFieldType.TypeEnum mapType(Schema type) {
|
||||
if (type instanceof NumberSchema) {
|
||||
return DataSetFieldType.TypeEnum.NUMBER;
|
||||
}
|
||||
if (type instanceof StringSchema) {
|
||||
return DataSetFieldType.TypeEnum.STRING;
|
||||
}
|
||||
if (type instanceof BooleanSchema || type instanceof TrueSchema || type instanceof FalseSchema) {
|
||||
return DataSetFieldType.TypeEnum.BOOLEAN;
|
||||
}
|
||||
if (type instanceof ObjectSchema) {
|
||||
return DataSetFieldType.TypeEnum.STRUCT;
|
||||
}
|
||||
if (type instanceof ReferenceSchema s) {
|
||||
return mapType(s.getReferredSchema());
|
||||
}
|
||||
if (type instanceof CombinedSchema) {
|
||||
return DataSetFieldType.TypeEnum.UNION;
|
||||
}
|
||||
return DataSetFieldType.TypeEnum.UNKNOWN;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,230 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd.schema;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.protobuf.BoolValue;
|
||||
import com.google.protobuf.BytesValue;
|
||||
import com.google.protobuf.Descriptors;
|
||||
import com.google.protobuf.Descriptors.Descriptor;
|
||||
import com.google.protobuf.DoubleValue;
|
||||
import com.google.protobuf.Duration;
|
||||
import com.google.protobuf.FloatValue;
|
||||
import com.google.protobuf.Int32Value;
|
||||
import com.google.protobuf.Int64Value;
|
||||
import com.google.protobuf.StringValue;
|
||||
import com.google.protobuf.Timestamp;
|
||||
import com.google.protobuf.UInt32Value;
|
||||
import com.google.protobuf.UInt64Value;
|
||||
import com.google.protobuf.Value;
|
||||
import com.provectus.kafka.ui.service.integration.odd.Oddrn;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaSubject;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import lombok.experimental.UtilityClass;
|
||||
import org.opendatadiscovery.client.model.DataSetField;
|
||||
import org.opendatadiscovery.client.model.DataSetFieldType;
|
||||
import org.opendatadiscovery.client.model.DataSetFieldType.TypeEnum;
|
||||
import org.opendatadiscovery.oddrn.model.KafkaPath;
|
||||
|
||||
@UtilityClass
|
||||
class ProtoExtractor {
|
||||
|
||||
private static final Set<String> PRIMITIVES_WRAPPER_TYPE_NAMES = Set.of(
|
||||
BoolValue.getDescriptor().getFullName(),
|
||||
Int32Value.getDescriptor().getFullName(),
|
||||
UInt32Value.getDescriptor().getFullName(),
|
||||
Int64Value.getDescriptor().getFullName(),
|
||||
UInt64Value.getDescriptor().getFullName(),
|
||||
StringValue.getDescriptor().getFullName(),
|
||||
BytesValue.getDescriptor().getFullName(),
|
||||
FloatValue.getDescriptor().getFullName(),
|
||||
DoubleValue.getDescriptor().getFullName()
|
||||
);
|
||||
|
||||
List<DataSetField> extract(SchemaSubject subject, KafkaPath topicOddrn, boolean isKey) {
|
||||
Descriptor schema = new ProtobufSchema(subject.getSchema()).toDescriptor();
|
||||
List<DataSetField> result = new ArrayList<>();
|
||||
result.add(DataSetFieldsExtractors.rootField(topicOddrn, isKey));
|
||||
var rootOddrn = topicOddrn.oddrn() + "/columns/" + (isKey ? "key" : "value");
|
||||
schema.getFields().forEach(f ->
|
||||
extract(f,
|
||||
rootOddrn,
|
||||
rootOddrn + "/" + f.getName(),
|
||||
f.getName(),
|
||||
!f.isRequired(),
|
||||
f.isRepeated(),
|
||||
ImmutableSet.of(schema.getFullName()),
|
||||
result
|
||||
));
|
||||
return result;
|
||||
}
|
||||
|
||||
private void extract(Descriptors.FieldDescriptor field,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
boolean nullable,
|
||||
boolean repeated,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
if (repeated) {
|
||||
extractRepeated(field, parentOddr, oddrn, name, nullable, registeredRecords, sink);
|
||||
} else if (field.getType() == Descriptors.FieldDescriptor.Type.MESSAGE) {
|
||||
extractMessage(field, parentOddr, oddrn, name, nullable, registeredRecords, sink);
|
||||
} else {
|
||||
extractPrimitive(field, parentOddr, oddrn, name, nullable, sink);
|
||||
}
|
||||
}
|
||||
|
||||
// converts some(!) Protobuf Well-known type (from google.protobuf.* packages)
|
||||
// see JsonFormat::buildWellKnownTypePrinters for impl details
|
||||
private boolean extractProtoWellKnownType(Descriptors.FieldDescriptor field,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
boolean nullable,
|
||||
List<DataSetField> sink) {
|
||||
// all well-known types are messages
|
||||
if (field.getType() != Descriptors.FieldDescriptor.Type.MESSAGE) {
|
||||
return false;
|
||||
}
|
||||
String typeName = field.getMessageType().getFullName();
|
||||
if (typeName.equals(Timestamp.getDescriptor().getFullName())) {
|
||||
sink.add(createDataSetField(name, parentOddr, oddrn, TypeEnum.DATETIME, typeName, nullable));
|
||||
return true;
|
||||
}
|
||||
if (typeName.equals(Duration.getDescriptor().getFullName())) {
|
||||
sink.add(createDataSetField(name, parentOddr, oddrn, TypeEnum.DURATION, typeName, nullable));
|
||||
return true;
|
||||
}
|
||||
if (typeName.equals(Value.getDescriptor().getFullName())) {
|
||||
//TODO: use ANY type when it will appear in ODD
|
||||
sink.add(createDataSetField(name, parentOddr, oddrn, TypeEnum.UNKNOWN, typeName, nullable));
|
||||
return true;
|
||||
}
|
||||
if (PRIMITIVES_WRAPPER_TYPE_NAMES.contains(typeName)) {
|
||||
var wrapped = field.getMessageType().findFieldByName("value");
|
||||
sink.add(createDataSetField(name, parentOddr, oddrn, mapType(wrapped.getType()), typeName, true));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void extractRepeated(Descriptors.FieldDescriptor field,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
sink.add(createDataSetField(name, parentOddr, oddrn, TypeEnum.LIST, "repeated", nullable));
|
||||
|
||||
String itemName = field.getType() == Descriptors.FieldDescriptor.Type.MESSAGE
|
||||
? field.getMessageType().getName()
|
||||
: field.getType().name().toLowerCase();
|
||||
|
||||
extract(
|
||||
field,
|
||||
oddrn,
|
||||
oddrn + "/items/" + itemName,
|
||||
itemName,
|
||||
nullable,
|
||||
false,
|
||||
registeredRecords,
|
||||
sink
|
||||
);
|
||||
}
|
||||
|
||||
private void extractMessage(Descriptors.FieldDescriptor field,
|
||||
String parentOddr,
|
||||
String oddrn, //null for root
|
||||
String name,
|
||||
boolean nullable,
|
||||
ImmutableSet<String> registeredRecords,
|
||||
List<DataSetField> sink) {
|
||||
if (extractProtoWellKnownType(field, parentOddr, oddrn, name, nullable, sink)) {
|
||||
return;
|
||||
}
|
||||
sink.add(createDataSetField(name, parentOddr, oddrn, TypeEnum.STRUCT, getLogicalTypeName(field), nullable));
|
||||
|
||||
String msgTypeName = field.getMessageType().getFullName();
|
||||
if (registeredRecords.contains(msgTypeName)) {
|
||||
// avoiding recursion by checking if record already registered in parsing chain
|
||||
return;
|
||||
}
|
||||
var newRegisteredRecords = ImmutableSet.<String>builder()
|
||||
.addAll(registeredRecords)
|
||||
.add(msgTypeName)
|
||||
.build();
|
||||
|
||||
field.getMessageType()
|
||||
.getFields()
|
||||
.forEach(f -> {
|
||||
extract(f,
|
||||
oddrn,
|
||||
oddrn + "/fields/" + f.getName(),
|
||||
f.getName(),
|
||||
!f.isRequired(),
|
||||
f.isRepeated(),
|
||||
newRegisteredRecords,
|
||||
sink
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
private void extractPrimitive(Descriptors.FieldDescriptor field,
|
||||
String parentOddr,
|
||||
String oddrn,
|
||||
String name,
|
||||
boolean nullable,
|
||||
List<DataSetField> sink) {
|
||||
sink.add(
|
||||
createDataSetField(
|
||||
name,
|
||||
parentOddr,
|
||||
oddrn,
|
||||
mapType(field.getType()),
|
||||
getLogicalTypeName(field),
|
||||
nullable
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
private String getLogicalTypeName(Descriptors.FieldDescriptor f) {
|
||||
return f.getType() == Descriptors.FieldDescriptor.Type.MESSAGE
|
||||
? f.getMessageType().getFullName()
|
||||
: f.getType().name().toLowerCase();
|
||||
}
|
||||
|
||||
private DataSetField createDataSetField(String name,
|
||||
String parentOddrn,
|
||||
String oddrn,
|
||||
TypeEnum type,
|
||||
String logicalType,
|
||||
Boolean nullable) {
|
||||
return new DataSetField()
|
||||
.name(name)
|
||||
.parentFieldOddrn(parentOddrn)
|
||||
.oddrn(oddrn)
|
||||
.type(
|
||||
new DataSetFieldType()
|
||||
.isNullable(nullable)
|
||||
.logicalType(logicalType)
|
||||
.type(type)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
private TypeEnum mapType(Descriptors.FieldDescriptor.Type type) {
|
||||
return switch (type) {
|
||||
case INT32, INT64, SINT32, SFIXED32, SINT64, UINT32, UINT64, FIXED32, FIXED64, SFIXED64 -> TypeEnum.INTEGER;
|
||||
case FLOAT, DOUBLE -> TypeEnum.NUMBER;
|
||||
case STRING, ENUM -> TypeEnum.STRING;
|
||||
case BOOL -> TypeEnum.BOOLEAN;
|
||||
case BYTES -> TypeEnum.BINARY;
|
||||
case MESSAGE, GROUP -> TypeEnum.STRUCT;
|
||||
};
|
||||
}
|
||||
|
||||
}
|
|
@ -43,12 +43,13 @@ public class KsqlApiClient {
|
|||
UndefineVariableContext.class
|
||||
);
|
||||
|
||||
@Builder
|
||||
@Builder(toBuilder = true)
|
||||
@Value
|
||||
public static class KsqlResponseTable {
|
||||
String header;
|
||||
List<String> columnNames;
|
||||
List<List<JsonNode>> values;
|
||||
boolean error;
|
||||
|
||||
public Optional<JsonNode> getColumnValue(List<JsonNode> row, String column) {
|
||||
return Optional.ofNullable(row.get(columnNames.indexOf(column)));
|
||||
|
@ -68,26 +69,22 @@ public class KsqlApiClient {
|
|||
|
||||
public KsqlApiClient(String baseUrl,
|
||||
@Nullable ClustersProperties.KsqldbServerAuth ksqldbServerAuth,
|
||||
@Nullable ClustersProperties.WebClientSsl ksqldbServerSsl,
|
||||
@Nullable ClustersProperties.TruststoreConfig ksqldbServerSsl,
|
||||
@Nullable ClustersProperties.KeystoreConfig keystoreConfig,
|
||||
@Nullable DataSize maxBuffSize) {
|
||||
this.baseUrl = baseUrl;
|
||||
this.webClient = webClient(ksqldbServerAuth, ksqldbServerSsl, maxBuffSize);
|
||||
this.webClient = webClient(ksqldbServerAuth, ksqldbServerSsl, keystoreConfig, maxBuffSize);
|
||||
}
|
||||
|
||||
private static WebClient webClient(@Nullable ClustersProperties.KsqldbServerAuth ksqldbServerAuth,
|
||||
@Nullable ClustersProperties.WebClientSsl ksqldbServerSsl,
|
||||
@Nullable ClustersProperties.TruststoreConfig truststoreConfig,
|
||||
@Nullable ClustersProperties.KeystoreConfig keystoreConfig,
|
||||
@Nullable DataSize maxBuffSize) {
|
||||
ksqldbServerAuth = Optional.ofNullable(ksqldbServerAuth).orElse(new ClustersProperties.KsqldbServerAuth());
|
||||
ksqldbServerSsl = Optional.ofNullable(ksqldbServerSsl).orElse(new ClustersProperties.WebClientSsl());
|
||||
maxBuffSize = Optional.ofNullable(maxBuffSize).orElse(DataSize.ofMegabytes(20));
|
||||
|
||||
return new WebClientConfigurator()
|
||||
.configureSsl(
|
||||
ksqldbServerSsl.getKeystoreLocation(),
|
||||
ksqldbServerSsl.getKeystorePassword(),
|
||||
ksqldbServerSsl.getTruststoreLocation(),
|
||||
ksqldbServerSsl.getTruststorePassword()
|
||||
)
|
||||
.configureSsl(truststoreConfig, keystoreConfig)
|
||||
.configureBasicAuth(
|
||||
ksqldbServerAuth.getUsername(),
|
||||
ksqldbServerAuth.getPassword()
|
||||
|
|
|
@ -3,14 +3,13 @@ package com.provectus.kafka.ui.service.ksql.response;
|
|||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
import com.fasterxml.jackson.databind.node.TextNode;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.provectus.kafka.ui.exception.KsqlApiException;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import org.springframework.web.reactive.function.client.WebClientResponseException;
|
||||
|
||||
public class ResponseParser {
|
||||
|
@ -24,11 +23,7 @@ public class ResponseParser {
|
|||
return Optional.of(
|
||||
KsqlApiClient.KsqlResponseTable.builder()
|
||||
.header("Schema")
|
||||
.columnNames(
|
||||
Arrays.stream(jsonNode.get("header").get("schema").asText().split(","))
|
||||
.map(String::trim)
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.columnNames(parseSelectHeadersString(jsonNode.get("header").get("schema").asText()))
|
||||
.build());
|
||||
}
|
||||
if (arrayFieldNonEmpty(jsonNode, "row")) {
|
||||
|
@ -46,18 +41,50 @@ public class ResponseParser {
|
|||
return Optional.empty();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static List<String> parseSelectHeadersString(String str) {
|
||||
List<String> headers = new ArrayList<>();
|
||||
int structNesting = 0;
|
||||
boolean quotes = false;
|
||||
var headerBuilder = new StringBuilder();
|
||||
for (char ch : str.toCharArray()) {
|
||||
if (ch == '<') {
|
||||
structNesting++;
|
||||
} else if (ch == '>') {
|
||||
structNesting--;
|
||||
} else if (ch == '`') {
|
||||
quotes = !quotes;
|
||||
} else if (ch == ' ' && headerBuilder.isEmpty()) {
|
||||
continue; //skipping leading & training whitespaces
|
||||
} else if (ch == ',' && structNesting == 0 && !quotes) {
|
||||
headers.add(headerBuilder.toString());
|
||||
headerBuilder = new StringBuilder();
|
||||
continue;
|
||||
}
|
||||
headerBuilder.append(ch);
|
||||
}
|
||||
if (!headerBuilder.isEmpty()) {
|
||||
headers.add(headerBuilder.toString());
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
public static KsqlApiClient.KsqlResponseTable errorTableWithTextMsg(String errorText) {
|
||||
return KsqlApiClient.KsqlResponseTable.builder()
|
||||
.header("Execution error")
|
||||
.columnNames(List.of("message"))
|
||||
.values(List.of(List.of(new TextNode(errorText))))
|
||||
.error(true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static KsqlApiClient.KsqlResponseTable parseErrorResponse(WebClientResponseException e) {
|
||||
try {
|
||||
var errBody = new JsonMapper().readTree(e.getResponseBodyAsString());
|
||||
return DynamicParser.parseObject("Execution error", errBody);
|
||||
return DynamicParser.parseObject("Execution error", errBody)
|
||||
.toBuilder()
|
||||
.error(true)
|
||||
.build();
|
||||
} catch (Exception ex) {
|
||||
return errorTableWithTextMsg(
|
||||
String.format(
|
||||
|
|
|
@ -41,9 +41,9 @@ public class DataMasking {
|
|||
|
||||
private final List<Mask> masks;
|
||||
|
||||
public static DataMasking create(List<ClustersProperties.Masking> config) {
|
||||
public static DataMasking create(@Nullable List<ClustersProperties.Masking> config) {
|
||||
return new DataMasking(
|
||||
config.stream().map(property -> {
|
||||
Optional.ofNullable(config).orElse(List.of()).stream().map(property -> {
|
||||
Preconditions.checkNotNull(property.getType(), "masking type not specifed");
|
||||
Preconditions.checkArgument(
|
||||
StringUtils.isNotEmpty(property.getTopicKeysPattern())
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue