Compare commits
1 commit
master
...
experiment
Author | SHA1 | Date | |
---|---|---|---|
![]() |
86092a8e58 |
268 changed files with 4794 additions and 9406 deletions
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
|
@ -14,5 +14,5 @@
|
|||
# TESTS
|
||||
/kafka-ui-e2e-checks/ @provectus/kafka-qa
|
||||
|
||||
# INFRA
|
||||
/.github/workflows/ @provectus/kafka-devops
|
||||
# HELM CHARTS
|
||||
/charts/ @provectus/kafka-devops
|
||||
|
|
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,8 +1,5 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Report helm issue
|
||||
url: https://github.com/provectus/kafka-ui-charts
|
||||
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
|
||||
- name: Official documentation
|
||||
url: https://docs.kafka-ui.provectus.io/
|
||||
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
|
||||
|
|
92
.github/ISSUE_TEMPLATE/helm.yml
vendored
Normal file
92
.github/ISSUE_TEMPLATE/helm.yml
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
name: "⎈ K8s/Helm problem report"
|
||||
description: "Report a problem with k8s/helm charts/etc"
|
||||
labels: ["status/triage", "scope/k8s"]
|
||||
assignees: []
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Hi, thanks for raising the issue(-s), all contributions really matter!
|
||||
Please, note that we'll close the issue without further explanation if you don't follow
|
||||
this template and don't provide the information requested within this template.
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Issue submitter TODO list
|
||||
description: By you checking these checkboxes we can be sure you've done the essential things.
|
||||
options:
|
||||
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
|
||||
required: true
|
||||
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
|
||||
required: true
|
||||
- label: I've tried running `master`-labeled docker image and the issue still persists there
|
||||
required: true
|
||||
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the bug (actual behavior)
|
||||
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A clear and concise description of what you expected to happen
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your installation details
|
||||
description: |
|
||||
How do you run the app? Please provide as much info as possible:
|
||||
1. App version (commit hash in the top left corner of the UI)
|
||||
2. Helm chart version
|
||||
3. Your application config. Please remove the sensitive info like passwords or API keys.
|
||||
4. Any IAAC configs
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: |
|
||||
Please write down the order of the actions required to reproduce the issue.
|
||||
For the advanced setups/complicated issue, we might need you to provide
|
||||
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: |
|
||||
If applicable, add screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Logs
|
||||
description: |
|
||||
If applicable, *upload* screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Add any other context about the problem here. E.G.:
|
||||
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
|
||||
Were they successful or the same issue occurred? Please provide steps as well.
|
||||
2. Related issues (if there are any).
|
||||
3. Logs (if available)
|
||||
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
|
||||
validations:
|
||||
required: false
|
4
.github/workflows/aws_publisher.yaml
vendored
4
.github/workflows/aws_publisher.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release: AWS Marketplace Publisher"
|
||||
name: AWS Marketplace Publisher
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -31,7 +31,7 @@ jobs:
|
|||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
7
.github/workflows/backend.yml
vendored
7
.github/workflows/backend.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Backend: PR/master build & test"
|
||||
name: Backend build and test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -8,9 +8,6 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-api/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -32,7 +29,7 @@ jobs:
|
|||
key: ${{ runner.os }}-sonar
|
||||
restore-keys: ${{ runner.os }}-sonar
|
||||
- name: Build and analyze pull request target
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
|
||||
|
|
4
.github/workflows/block_merge.yml
vendored
4
.github/workflows/block_merge.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: PR block merge"
|
||||
name: Pull Request Labels
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
@ -6,7 +6,7 @@ jobs:
|
|||
block_merge:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
- uses: mheap/github-action-required-labels@v4
|
||||
with:
|
||||
mode: exactly
|
||||
count: 0
|
||||
|
|
28
.github/workflows/branch-deploy.yml
vendored
28
.github/workflows/branch-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing: Init env"
|
||||
name: Feature testing init
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -84,22 +84,18 @@ jobs:
|
|||
git add ../kafka-ui-from-branch/
|
||||
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
|
||||
|
||||
- name: update status check for private deployment
|
||||
- name: make comment with private deployment link
|
||||
if: ${{ github.event.label.name == 'status/feature_testing' }}
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Click Details button to open custom deployment page"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
|
||||
|
||||
- name: update status check for public deployment
|
||||
- name: make comment with public deployment link
|
||||
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Click Details button to open custom deployment page"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes
|
||||
|
|
8
.github/workflows/branch-remove.yml
vendored
8
.github/workflows/branch-remove.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing: Destroy env"
|
||||
name: Feature testing destroy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -20,3 +20,9 @@ jobs:
|
|||
git config --global user.name "infra-tech"
|
||||
git add ../kafka-ui-from-branch/
|
||||
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
|
||||
- name: make comment with deployment link
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment removed
|
||||
|
|
5
.github/workflows/build-public-image.yml
vendored
5
.github/workflows/build-public-image.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Image Testing: Deploy"
|
||||
name: Build Docker image and push
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -70,5 +70,6 @@ jobs:
|
|||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
|
||||
|
||||
outputs:
|
||||
tag: ${{ steps.extract_branch.outputs.tag }}
|
||||
|
|
28
.github/workflows/create-branch-for-helm.yaml
vendored
Normal file
28
.github/workflows/create-branch-for-helm.yaml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
name: Prepare helm release
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [prepare-helm-release]
|
||||
jobs:
|
||||
change-app-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
- name: Change versions
|
||||
run: |
|
||||
git checkout -b release-${{ github.event.client_payload.appversion}}
|
||||
version=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
version=${version%.*}.$((${version##*.}+1))
|
||||
sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
|
||||
sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
|
||||
git add charts/kafka-ui/Chart.yaml
|
||||
git commit -m "release ${version}"
|
||||
git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
|
||||
- name: Slack Notification
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
|
||||
SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
2
.github/workflows/cve.yaml
vendored
2
.github/workflows/cve.yaml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Run CVE checks
|
||||
uses: aquasecurity/trivy-action@0.12.0
|
||||
uses: aquasecurity/trivy-action@0.10.0
|
||||
with:
|
||||
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
|
||||
format: "table"
|
||||
|
|
10
.github/workflows/delete-public-image.yml
vendored
10
.github/workflows/delete-public-image.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Image Testing: Delete"
|
||||
name: Delete Public ECR Image
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -32,3 +32,9 @@ jobs:
|
|||
--repository-name kafka-ui-custom-build \
|
||||
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
|
||||
--region us-east-1
|
||||
- name: make comment with private deployment link
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed
|
||||
|
|
2
.github/workflows/documentation.yaml
vendored
2
.github/workflows/documentation.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Docs: URL linter"
|
||||
name: Documentation URLs linter
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
4
.github/workflows/e2e-automation.yml
vendored
4
.github/workflows/e2e-automation.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: Automation suite"
|
||||
name: E2E Automation suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
10
.github/workflows/e2e-checks.yaml
vendored
10
.github/workflows/e2e-checks.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: PR healthcheck"
|
||||
name: E2E PR health check
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [ "opened", "edited", "reopened", "synchronize" ]
|
||||
|
@ -8,8 +8,6 @@ on:
|
|||
- "kafka-ui-react-app/**"
|
||||
- "kafka-ui-e2e-checks/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
statuses: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -18,10 +16,10 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-central-1
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
|
|
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: Manual suite"
|
||||
name: E2E Manual suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
4
.github/workflows/e2e-weekly.yml
vendored
4
.github/workflows/e2e-weekly.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: Weekly suite"
|
||||
name: E2E Weekly suite
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 1 * * 1'
|
||||
|
@ -11,7 +11,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
15
.github/workflows/frontend.yaml
vendored
15
.github/workflows/frontend.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Frontend: PR/master build & test"
|
||||
name: Frontend build and test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -8,9 +8,6 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-contract/**"
|
||||
- "kafka-ui-react-app/**"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
env:
|
||||
|
@ -23,13 +20,13 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
with:
|
||||
version: 8.6.12
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.8.1
|
||||
uses: actions/setup-node@v3.6.0
|
||||
with:
|
||||
node-version: "18.17.1"
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
|
||||
- name: Install Node dependencies
|
||||
|
@ -49,7 +46,7 @@ jobs:
|
|||
cd kafka-ui-react-app/
|
||||
pnpm test:CI
|
||||
- name: SonarCloud Scan
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
uses: workshur/sonarcloud-github-action@improved_basedir
|
||||
with:
|
||||
projectBaseDir: ./kafka-ui-react-app
|
||||
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
|
||||
|
|
38
.github/workflows/helm.yaml
vendored
Normal file
38
.github/workflows/helm.yaml
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
name: Helm linter
|
||||
on:
|
||||
pull_request:
|
||||
types: ["opened", "edited", "reopened", "synchronize"]
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- "charts/**"
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Helm tool installer
|
||||
uses: Azure/setup-helm@v3
|
||||
- name: Setup Kubeval
|
||||
uses: lra/setup-kubeval@v1.0.1
|
||||
#check, was helm version increased in Chart.yaml?
|
||||
- name: Check version
|
||||
shell: bash
|
||||
run: |
|
||||
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
|
||||
echo $helm_version_old
|
||||
echo $helm_version_new
|
||||
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
||||
- name: Run kubeval
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
|
||||
K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e '^v1\.1[0-7]\{1\}' | cut -c2-)
|
||||
echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
|
||||
echo ""
|
||||
for version in $K8S_VERSIONS
|
||||
do
|
||||
echo $version;
|
||||
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
|
||||
done
|
3
.github/workflows/master.yaml
vendored
3
.github/workflows/master.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Master: Build & deploy"
|
||||
name: Master branch build & deploy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
@ -58,7 +58,6 @@ jobs:
|
|||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: kafka-ui-api
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: false
|
||||
push: true
|
||||
tags: |
|
||||
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
|
||||
|
|
7
.github/workflows/pr-checks.yaml
vendored
7
.github/workflows/pr-checks.yaml
vendored
|
@ -1,14 +1,13 @@
|
|||
name: "PR: Checklist linter"
|
||||
name: "PR Checklist checked"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
permissions:
|
||||
checks: write
|
||||
|
||||
jobs:
|
||||
task-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.2
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- uses: dekinderfiets/pr-description-enforcer@0.0.1
|
||||
|
|
39
.github/workflows/release-helm.yaml
vendored
Normal file
39
.github/workflows/release-helm.yaml
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
name: Release helm
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "charts/**"
|
||||
|
||||
jobs:
|
||||
release-helm:
|
||||
runs-on:
|
||||
ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
|
||||
- uses: azure/setup-helm@v3
|
||||
|
||||
- name: add chart #realse helm with new version
|
||||
run: |
|
||||
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
|
||||
MSG=$(helm package charts/kafka-ui)
|
||||
git fetch origin
|
||||
git stash
|
||||
git checkout -b gh-pages origin/gh-pages
|
||||
git pull
|
||||
helm repo index .
|
||||
git add -f ${MSG##*/} index.yaml
|
||||
git commit -m "release ${VERSION}"
|
||||
git push
|
||||
- uses: rickstaa/action-create-tag@v1 #create new tag
|
||||
with:
|
||||
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"
|
2
.github/workflows/release-serde-api.yaml
vendored
2
.github/workflows/release-serde-api.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release: Serde API"
|
||||
name: Release serde api
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
|
|
10
.github/workflows/release.yaml
vendored
10
.github/workflows/release.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release"
|
||||
name: Release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
@ -34,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
@ -89,12 +89,14 @@ jobs:
|
|||
|
||||
charts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
needs: release
|
||||
steps:
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
|
||||
repository: provectus/kafka-ui-charts
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repository: provectus/kafka-ui
|
||||
event-type: prepare-helm-release
|
||||
client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'
|
||||
|
|
2
.github/workflows/release_drafter.yml
vendored
2
.github/workflows/release_drafter.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release Drafter run"
|
||||
name: Release Drafter
|
||||
|
||||
on:
|
||||
push:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing Public: Init env"
|
||||
name: Separate environment create
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing Public: Destroy env"
|
||||
name: Separate environment remove
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: 'Infra: Close stale issues'
|
||||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
|
|
4
.github/workflows/terraform-deploy.yml
vendored
4
.github/workflows/terraform-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Terraform deploy"
|
||||
name: Terraform deploy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -26,7 +26,7 @@ jobs:
|
|||
echo "Terraform will be triggered in this dir $TF_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/triage_issues.yml
vendored
2
.github/workflows/triage_issues.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Triage: Apply triage label for issues"
|
||||
name: Add triage label to new issues
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
|
|
2
.github/workflows/triage_prs.yml
vendored
2
.github/workflows/triage_prs.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Triage: Apply triage label for PRs"
|
||||
name: Add triage label to new PRs
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
|
@ -7,9 +7,7 @@ on:
|
|||
issues:
|
||||
types:
|
||||
- opened
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
welcome:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
2
.github/workflows/workflow_linter.yaml
vendored
2
.github/workflows/workflow_linter.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Workflow linter"
|
||||
name: "Workflow linter"
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -31,9 +31,6 @@ build/
|
|||
.vscode/
|
||||
/kafka-ui-api/app/node
|
||||
|
||||
### SDKMAN ###
|
||||
.sdkmanrc
|
||||
|
||||
.DS_Store
|
||||
*.code-workspace
|
||||
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
|
||||
</p>
|
||||
|
||||
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
|
||||
|
||||
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
|
@ -91,7 +87,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
|
|||
|
||||
Then access the web UI at [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
|
||||
|
||||
## Persistent installation
|
||||
|
||||
|
|
25
charts/kafka-ui/.helmignore
Normal file
25
charts/kafka-ui/.helmignore
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
example/
|
||||
README.md
|
7
charts/kafka-ui/Chart.yaml
Normal file
7
charts/kafka-ui/Chart.yaml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: kafka-ui
|
||||
description: A Helm chart for kafka-UI
|
||||
type: application
|
||||
version: 0.7.0
|
||||
appVersion: v0.7.0
|
||||
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
1
charts/kafka-ui/README.md
Normal file
1
charts/kafka-ui/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.
|
3
charts/kafka-ui/index.yaml
Normal file
3
charts/kafka-ui/index.yaml
Normal file
|
@ -0,0 +1,3 @@
|
|||
apiVersion: v1
|
||||
entries: {}
|
||||
generated: "2021-11-11T12:26:08.479581+03:00"
|
21
charts/kafka-ui/templates/NOTES.txt
Normal file
21
charts/kafka-ui/templates/NOTES.txt
Normal file
|
@ -0,0 +1,21 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
|
||||
{{- end }}
|
84
charts/kafka-ui/templates/_helpers.tpl
Normal file
84
charts/kafka-ui/templates/_helpers.tpl
Normal file
|
@ -0,0 +1,84 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kafka-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kafka-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kafka-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kafka-ui.labels" -}}
|
||||
helm.sh/chart: {{ include "kafka-ui.chart" . }}
|
||||
{{ include "kafka-ui.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kafka-ui.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kafka-ui.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
This allows us to check if the registry of the image is specified or not.
|
||||
*/}}
|
||||
{{- define "kafka-ui.imageName" -}}
|
||||
{{- $registryName := .Values.image.registry -}}
|
||||
{{- if .Values.global }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- $registryName = .Values.global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- $repository := .Values.image.repository -}}
|
||||
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
|
||||
{{- if $registryName }}
|
||||
{{- printf "%s/%s:%s" $registryName $repository $tag -}}
|
||||
{{- else }}
|
||||
{{- printf "%s:%s" $repository $tag -}}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
10
charts/kafka-ui/templates/configmap.yaml
Normal file
10
charts/kafka-ui/templates/configmap.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
{{- if .Values.envs.config -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- toYaml .Values.envs.config | nindent 2 }}
|
||||
{{- end -}}
|
11
charts/kafka-ui/templates/configmap_fromValues.yaml
Normal file
11
charts/kafka-ui/templates/configmap_fromValues.yaml
Normal file
|
@ -0,0 +1,11 @@
|
|||
{{- if .Values.yamlApplicationConfig -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}-fromvalues
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yml: |-
|
||||
{{- toYaml .Values.yamlApplicationConfig | nindent 4}}
|
||||
{{ end }}
|
150
charts/kafka-ui/templates/deployment.yaml
Normal file
150
charts/kafka-ui/templates/deployment.yaml
Normal file
|
@ -0,0 +1,150 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
labels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.initContainers }}
|
||||
initContainers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "kafka-ui.imageName" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
|
||||
env:
|
||||
{{- with .Values.env }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: SPRING_CONFIG_ADDITIONAL-LOCATION
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
value: /kafka-ui/config.yml
|
||||
{{- else if .Values.yamlApplicationConfigConfigMap }}
|
||||
value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
envFrom:
|
||||
{{- if .Values.existingConfigMap }}
|
||||
- configMapRef:
|
||||
name: {{ .Values.existingConfigMap }}
|
||||
{{- end }}
|
||||
{{- if .Values.envs.config }}
|
||||
- configMapRef:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.existingSecret }}
|
||||
- secretRef:
|
||||
name: {{ .Values.existingSecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.envs.secret}}
|
||||
- secretRef:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
{{- end}}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
{{- if .Values.probes.useHttpsScheme }}
|
||||
scheme: HTTPS
|
||||
{{- end }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
{{- if .Values.probes.useHttpsScheme }}
|
||||
scheme: HTTPS
|
||||
{{- end }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
|
||||
volumeMounts:
|
||||
{{- with .Values.volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
- name: kafka-ui-yaml-conf
|
||||
mountPath: /kafka-ui/
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: kafka-ui-yaml-conf-configmap
|
||||
mountPath: /kafka-ui/
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
|
||||
volumes:
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
- name: kafka-ui-yaml-conf
|
||||
configMap:
|
||||
name: {{ include "kafka-ui.fullname" . }}-fromvalues
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: kafka-ui-yaml-conf-configmap
|
||||
configMap:
|
||||
name: {{ .Values.yamlApplicationConfigConfigMap.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
46
charts/kafka-ui/templates/hpa.yaml
Normal file
46
charts/kafka-ui/templates/hpa.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
|
||||
{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
|
||||
apiVersion: autoscaling/v2
|
||||
{{- else }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
{{- end }}
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
{{- if $isHigher1p25 }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- else }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
{{- if $isHigher1p25 }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- else }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
89
charts/kafka-ui/templates/ingress.yaml
Normal file
89
charts/kafka-ui/templates/ingress.yaml
Normal file
|
@ -0,0 +1,89 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "kafka-ui.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
|
||||
{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else }}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls.enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ tpl .Values.ingress.host . }}
|
||||
secretName: {{ .Values.ingress.tls.secretName }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
port:
|
||||
number: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
port:
|
||||
number: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- if tpl .Values.ingress.host . }}
|
||||
host: {{tpl .Values.ingress.host . }}
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- if tpl .Values.ingress.host . }}
|
||||
host: {{ tpl .Values.ingress.host . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
18
charts/kafka-ui/templates/networkpolicy-egress.yaml
Normal file
18
charts/kafka-ui/templates/networkpolicy-egress.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
{{- if .Values.networkPolicy.egressRules.customRules }}
|
||||
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
18
charts/kafka-ui/templates/networkpolicy-ingress.yaml
Normal file
18
charts/kafka-ui/templates/networkpolicy-ingress.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
{{- if .Values.networkPolicy.ingressRules.customRules }}
|
||||
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
13
charts/kafka-ui/templates/secret.yaml
Normal file
13
charts/kafka-ui/templates/secret.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
{{- if .Values.envs.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $key, $val := .Values.envs.secret }}
|
||||
{{ $key }}: {{ $val | b64enc | quote }}
|
||||
{{- end -}}
|
||||
{{- end}}
|
22
charts/kafka-ui/templates/service.yaml
Normal file
22
charts/kafka-ui/templates/service.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}
|
12
charts/kafka-ui/templates/serviceaccount.yaml
Normal file
12
charts/kafka-ui/templates/serviceaccount.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
161
charts/kafka-ui/values.yaml
Normal file
161
charts/kafka-ui/values.yaml
Normal file
|
@ -0,0 +1,161 @@
|
|||
replicaCount: 1
|
||||
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: provectuslabs/kafka-ui
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
existingConfigMap: ""
|
||||
yamlApplicationConfig:
|
||||
{}
|
||||
# kafka:
|
||||
# clusters:
|
||||
# - name: yaml
|
||||
# bootstrapServers: kafka-service:9092
|
||||
# spring:
|
||||
# security:
|
||||
# oauth2:
|
||||
# auth:
|
||||
# type: disabled
|
||||
# management:
|
||||
# health:
|
||||
# ldap:
|
||||
# enabled: false
|
||||
yamlApplicationConfigConfigMap:
|
||||
{}
|
||||
# keyName: config.yml
|
||||
# name: configMapName
|
||||
existingSecret: ""
|
||||
envs:
|
||||
secret: {}
|
||||
config: {}
|
||||
|
||||
networkPolicy:
|
||||
enabled: false
|
||||
egressRules:
|
||||
## Additional custom egress rules
|
||||
## e.g:
|
||||
## customRules:
|
||||
## - to:
|
||||
## - namespaceSelector:
|
||||
## matchLabels:
|
||||
## label: example
|
||||
customRules: []
|
||||
ingressRules:
|
||||
## Additional custom ingress rules
|
||||
## e.g:
|
||||
## customRules:
|
||||
## - from:
|
||||
## - namespaceSelector:
|
||||
## matchLabels:
|
||||
## label: example
|
||||
customRules: []
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
## Annotations to be added to kafka-ui Deployment
|
||||
##
|
||||
annotations: {}
|
||||
|
||||
## Set field schema as HTTPS for readines and liveness probe
|
||||
##
|
||||
probes:
|
||||
useHttpsScheme: false
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
# if you want to force a specific nodePort. Must be use with service.type=NodePort
|
||||
# nodePort:
|
||||
|
||||
# Ingress configuration
|
||||
ingress:
|
||||
# Enable ingress resource
|
||||
enabled: false
|
||||
|
||||
# Annotations for the Ingress
|
||||
annotations: {}
|
||||
|
||||
# ingressClassName for the Ingress
|
||||
ingressClassName: ""
|
||||
|
||||
# The path for the Ingress
|
||||
path: "/"
|
||||
|
||||
# The path type for the Ingress
|
||||
pathType: "Prefix"
|
||||
|
||||
# The hostname for the Ingress
|
||||
host: ""
|
||||
|
||||
# configs for Ingress TLS
|
||||
tls:
|
||||
# Enable TLS termination for the Ingress
|
||||
enabled: false
|
||||
# the name of a pre-created Secret containing a TLS private key and certificate
|
||||
secretName: ""
|
||||
|
||||
# HTTP paths to add to the Ingress before the default path
|
||||
precedingPaths: []
|
||||
|
||||
# Http paths to add to the Ingress after the default path
|
||||
succeedingPaths: []
|
||||
|
||||
resources:
|
||||
{}
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 512Mi
|
||||
# requests:
|
||||
# cpu: 200m
|
||||
# memory: 256Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
env: {}
|
||||
|
||||
initContainers: {}
|
||||
|
||||
volumeMounts: {}
|
||||
|
||||
volumes: {}
|
|
@ -1,2 +1,2 @@
|
|||
rules:
|
||||
- pattern: ".*"
|
||||
- pattern: ".*"
|
|
@ -20,8 +20,6 @@ services:
|
|||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
|
||||
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
|
||||
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
|
|
|
@ -1,11 +1,7 @@
|
|||
#FROM azul/zulu-openjdk-alpine:17-jre-headless
|
||||
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
|
||||
|
||||
RUN apk add --no-cache \
|
||||
# snappy codec
|
||||
gcompat \
|
||||
# configuring timezones
|
||||
tzdata
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
|
|
|
@ -81,12 +81,6 @@
|
|||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-json-schema-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
|
@ -97,7 +91,7 @@
|
|||
<dependency>
|
||||
<groupId>software.amazon.msk</groupId>
|
||||
<artifactId>aws-msk-iam-auth</artifactId>
|
||||
<version>1.1.7</version>
|
||||
<version>1.1.6</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -120,11 +114,6 @@
|
|||
<artifactId>json</artifactId>
|
||||
<version>${org.json.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
@ -141,11 +130,6 @@
|
|||
<artifactId>commons-pool2</artifactId>
|
||||
<version>${apache.commons.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
<version>4.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
|
@ -249,6 +233,8 @@
|
|||
<groupId>org.springframework.security</groupId>
|
||||
<artifactId>spring-security-ldap</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-jsr223</artifactId>
|
||||
|
@ -325,7 +311,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<version>3.1.2</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
|
@ -403,7 +389,7 @@
|
|||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
<version>4.9.10</version>
|
||||
<version>4.0.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>get-the-git-infos</id>
|
||||
|
|
|
@ -51,12 +51,13 @@ public class ClustersProperties {
|
|||
List<Masking> masking;
|
||||
Long pollingThrottleRate;
|
||||
TruststoreConfig ssl;
|
||||
AuditProperties audit;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class PollingProperties {
|
||||
Integer pollTimeoutMs;
|
||||
Integer partitionPollTimeout;
|
||||
Integer noDataEmptyPolls;
|
||||
Integer maxPageSize;
|
||||
Integer defaultPageSize;
|
||||
}
|
||||
|
@ -142,23 +143,6 @@ public class ClustersProperties {
|
|||
}
|
||||
}
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public static class AuditProperties {
|
||||
String topic;
|
||||
Integer auditTopicsPartitions;
|
||||
Boolean topicAuditEnabled;
|
||||
Boolean consoleAuditEnabled;
|
||||
LogLevel level;
|
||||
Map<String, String> auditTopicProperties;
|
||||
|
||||
public enum LogLevel {
|
||||
ALL,
|
||||
ALTER_ONLY //default
|
||||
}
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void validateAndSetDefaults() {
|
||||
if (clusters != null) {
|
||||
|
|
|
@ -1,39 +1,18 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.server.reactive.ServerHttpRequest;
|
||||
import org.springframework.http.server.reactive.ServerHttpResponse;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
import org.springframework.web.server.WebFilterChain;
|
||||
import reactor.core.publisher.Mono;
|
||||
import org.springframework.web.reactive.config.CorsRegistry;
|
||||
import org.springframework.web.reactive.config.WebFluxConfigurer;
|
||||
|
||||
@Configuration
|
||||
public class CorsGlobalConfiguration {
|
||||
public class CorsGlobalConfiguration implements WebFluxConfigurer {
|
||||
|
||||
@Bean
|
||||
public WebFilter corsFilter() {
|
||||
return (final ServerWebExchange ctx, final WebFilterChain chain) -> {
|
||||
final ServerHttpRequest request = ctx.getRequest();
|
||||
|
||||
final ServerHttpResponse response = ctx.getResponse();
|
||||
final HttpHeaders headers = response.getHeaders();
|
||||
headers.add("Access-Control-Allow-Origin", "*");
|
||||
headers.add("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, OPTIONS");
|
||||
headers.add("Access-Control-Max-Age", "3600");
|
||||
headers.add("Access-Control-Allow-Headers", "Content-Type");
|
||||
|
||||
if (request.getMethod() == HttpMethod.OPTIONS) {
|
||||
response.setStatusCode(HttpStatus.OK);
|
||||
return Mono.empty();
|
||||
}
|
||||
|
||||
return chain.filter(ctx);
|
||||
};
|
||||
@Override
|
||||
public void addCorsMappings(CorsRegistry registry) {
|
||||
registry.addMapping("/**")
|
||||
.allowedOrigins("*")
|
||||
.allowedMethods("*")
|
||||
.allowedHeaders("*")
|
||||
.allowCredentials(false);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.beans.Transient;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
|
|
@ -13,7 +13,6 @@ abstract class AbstractAuthSecurityConfig {
|
|||
"/resources/**",
|
||||
"/actuator/health/**",
|
||||
"/actuator/info",
|
||||
"/actuator/prometheus",
|
||||
"/auth",
|
||||
"/login",
|
||||
"/logout",
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import lombok.Value;
|
||||
|
||||
public record AuthenticatedUser(String principal, Collection<String> groups) {
|
||||
|
||||
|
|
|
@ -6,13 +6,13 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
|
||||
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
|
||||
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
|
||||
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
|
@ -33,19 +33,15 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
|
||||
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
|
||||
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
|
||||
.logout(spec -> spec
|
||||
.logoutSuccessHandler(logoutSuccessHandler)
|
||||
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
return http
|
||||
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
|
||||
.csrf().disable()
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST).permitAll()
|
||||
.anyExchange().authenticated()
|
||||
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
|
||||
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
|
||||
.and().build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,12 +27,10 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
System.exit(1);
|
||||
}
|
||||
log.warn("Authentication is disabled. Access will be unrestricted.");
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.anyExchange()
|
||||
.permitAll()
|
||||
)
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http.authorizeExchange()
|
||||
.anyExchange().permitAll()
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -15,8 +15,6 @@ public class LdapProperties {
|
|||
private String userFilterSearchBase;
|
||||
private String userFilterSearchFilter;
|
||||
private String groupFilterSearchBase;
|
||||
private String groupFilterSearchFilter;
|
||||
private String groupRoleAttribute;
|
||||
|
||||
@Value("${oauth2.ldap.activeDirectory:false}")
|
||||
private boolean isActiveDirectory;
|
||||
|
|
|
@ -3,16 +3,14 @@ package com.provectus.kafka.ui.config.auth;
|
|||
import static com.provectus.kafka.ui.config.auth.AbstractAuthSecurityConfig.AUTH_WHITELIST;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.service.rbac.extractor.RbacLdapAuthoritiesExtractor;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
|
@ -24,7 +22,6 @@ import org.springframework.security.authentication.AuthenticationManager;
|
|||
import org.springframework.security.authentication.ProviderManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
|
@ -53,9 +50,9 @@ public class LdapSecurityConfig {
|
|||
|
||||
@Bean
|
||||
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource,
|
||||
LdapAuthoritiesPopulator authoritiesExtractor,
|
||||
AccessControlService acs) {
|
||||
var rbacEnabled = acs.isRbacEnabled();
|
||||
LdapAuthoritiesPopulator ldapAuthoritiesPopulator,
|
||||
@Nullable AccessControlService acs) {
|
||||
var rbacEnabled = acs != null && acs.isRbacEnabled();
|
||||
BindAuthenticator ba = new BindAuthenticator(contextSource);
|
||||
if (props.getBase() != null) {
|
||||
ba.setUserDnPatterns(new String[] {props.getBase()});
|
||||
|
@ -70,7 +67,7 @@ public class LdapSecurityConfig {
|
|||
AbstractLdapAuthenticationProvider authenticationProvider;
|
||||
if (!props.isActiveDirectory()) {
|
||||
authenticationProvider = rbacEnabled
|
||||
? new LdapAuthenticationProvider(ba, authoritiesExtractor)
|
||||
? new LdapAuthenticationProvider(ba, ldapAuthoritiesPopulator)
|
||||
: new LdapAuthenticationProvider(ba);
|
||||
} else {
|
||||
authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(props.getActiveDirectoryDomain(),
|
||||
|
@ -100,24 +97,11 @@ public class LdapSecurityConfig {
|
|||
|
||||
@Bean
|
||||
@Primary
|
||||
public DefaultLdapAuthoritiesPopulator ldapAuthoritiesExtractor(ApplicationContext context,
|
||||
BaseLdapPathContextSource contextSource,
|
||||
AccessControlService acs) {
|
||||
var rbacEnabled = acs != null && acs.isRbacEnabled();
|
||||
|
||||
DefaultLdapAuthoritiesPopulator extractor;
|
||||
|
||||
if (rbacEnabled) {
|
||||
extractor = new RbacLdapAuthoritiesExtractor(context, contextSource, props.getGroupFilterSearchBase());
|
||||
} else {
|
||||
extractor = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
|
||||
}
|
||||
|
||||
Optional.ofNullable(props.getGroupFilterSearchFilter()).ifPresent(extractor::setGroupSearchFilter);
|
||||
extractor.setRolePrefix("");
|
||||
extractor.setConvertToUpperCase(false);
|
||||
extractor.setSearchSubtree(true);
|
||||
return extractor;
|
||||
public LdapAuthoritiesPopulator ldapAuthoritiesPopulator(BaseLdapPathContextSource contextSource) {
|
||||
var authoritiesPopulator = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
|
||||
authoritiesPopulator.setRolePrefix("");
|
||||
authoritiesPopulator.setConvertToUpperCase(false);
|
||||
return authoritiesPopulator;
|
||||
}
|
||||
|
||||
@Bean
|
||||
|
@ -127,15 +111,21 @@ public class LdapSecurityConfig {
|
|||
log.info("Active Directory support for LDAP has been enabled.");
|
||||
}
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(Customizer.withDefaults())
|
||||
.logout(Customizer.withDefaults())
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.formLogin()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -12,11 +12,10 @@ import lombok.extern.log4j.Log4j2;
|
|||
import org.jetbrains.annotations.Nullable;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
|
@ -50,15 +49,21 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
|
||||
log.info("Configuring OAUTH2 authentication.");
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.oauth2Login(Customizer.withDefaults())
|
||||
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.oauth2Login()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
.logoutSuccessHandler(logoutHandler)
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -98,10 +103,7 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
|
||||
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
|
||||
final List<ClientRegistration> registrations =
|
||||
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
|
||||
if (registrations.isEmpty()) {
|
||||
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
|
||||
}
|
||||
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
|
||||
return new InMemoryReactiveClientRegistrationRepository(registrations);
|
||||
}
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.user.OAuth2User;
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
public class CognitoCondition extends AbstractProviderCondition implements Condition {
|
||||
@Override
|
||||
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
|
||||
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
|
||||
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,19 +2,12 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
||||
public abstract class AbstractController {
|
||||
|
||||
protected ClustersStorage clustersStorage;
|
||||
protected AccessControlService accessControlService;
|
||||
protected AuditService auditService;
|
||||
private ClustersStorage clustersStorage;
|
||||
|
||||
protected KafkaCluster getCluster(String name) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
|
@ -22,26 +15,8 @@ public abstract class AbstractController {
|
|||
String.format("Cluster with name '%s' not found", name)));
|
||||
}
|
||||
|
||||
protected Mono<Void> validateAccess(AccessContext context) {
|
||||
return accessControlService.validateAccess(context);
|
||||
}
|
||||
|
||||
protected void audit(AccessContext acxt, Signal<?> sig) {
|
||||
auditService.audit(acxt, sig);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||
this.clustersStorage = clustersStorage;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAccessControlService(AccessControlService accessControlService) {
|
||||
this.accessControlService = accessControlService;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAuditService(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -37,7 +38,7 @@ public class AccessController implements AuthorizationApi {
|
|||
.filter(role -> user.groups().contains(role.getName()))
|
||||
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
|
||||
.flatMap(Collection::stream)
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.switchIfEmpty(Mono.just(Collections.emptyList()));
|
||||
|
||||
|
@ -69,10 +70,10 @@ public class AccessController implements AuthorizationApi {
|
|||
.map(String::toUpperCase)
|
||||
.map(this::mapAction)
|
||||
.filter(Objects::nonNull)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
return dto;
|
||||
})
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -2,15 +2,13 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.api.AclsApi;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.service.acl.AclsService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
|
@ -27,6 +25,7 @@ import reactor.core.publisher.Mono;
|
|||
public class AclsController extends AbstractController implements AclsApi {
|
||||
|
||||
private final AclsService aclsService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
|
@ -34,14 +33,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -51,14 +48,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("deleteAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -71,7 +66,6 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.VIEW)
|
||||
.operationName("listAcls")
|
||||
.build();
|
||||
|
||||
var resourceType = Optional.ofNullable(resourceTypeDto)
|
||||
|
@ -84,12 +78,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
|
||||
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
aclsService.listAcls(getCluster(clusterName), filter)
|
||||
.map(ClusterMapper::toKafkaAclDto)))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -97,14 +91,12 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.VIEW)
|
||||
.operationName("getAclAsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
return accessControlService.validateAccess(context).then(
|
||||
aclsService.getAclAsCsvString(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.flatMap(Mono::just)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -113,64 +105,11 @@ public class AclsController extends AbstractController implements AclsApi {
|
|||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("syncAclsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(csvMono)
|
||||
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
|
||||
Mono<CreateConsumerAclDTO> createConsumerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createConsumerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createConsumerAclDto)
|
||||
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
|
||||
Mono<CreateProducerAclDTO> createProducerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createProducerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createProducerAclDto)
|
||||
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
|
||||
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createStreamAppAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createStreamAppAclDto)
|
||||
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ApplicationInfoService;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
|
@ -37,7 +38,7 @@ import reactor.util.function.Tuples;
|
|||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
|
@ -49,6 +50,7 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
|
@ -61,69 +63,62 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.operationName("getCurrentConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.build()
|
||||
)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("restartWithConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(restartRequestDto)
|
||||
.doOnNext(restartDto -> {
|
||||
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
|
||||
dynamicConfigOperations.persist(newConfig);
|
||||
})
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnSuccess(dto -> restarter.requestRestart())
|
||||
.map(dto -> ResponseEntity.ok().build());
|
||||
.map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("uploadConfigRelatedFile")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(fileFlux.single())
|
||||
.flatMap(file ->
|
||||
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.map(ResponseEntity::ok));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("validateConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(configDto)
|
||||
return configDto
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = newConfig.getKafka();
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
|
|
|
@ -36,10 +36,10 @@ public class AuthController {
|
|||
+ " <meta name=\"description\" content=\"\">\n"
|
||||
+ " <meta name=\"author\" content=\"\">\n"
|
||||
+ " <title>Please sign in</title>\n"
|
||||
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
|
||||
+ "crossorigin=\"anonymous\">\n"
|
||||
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
|
||||
+ " <link href=\"/static/css/signin.css\" "
|
||||
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
|
||||
+ " </head>\n"
|
||||
+ " <body>\n"
|
||||
|
|
|
@ -11,9 +11,8 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.service.BrokerService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -26,79 +25,63 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class BrokersController extends AbstractController implements BrokersApi {
|
||||
private static final String BROKER_ID = "brokerId";
|
||||
|
||||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getBrokers")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(job));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getBrokersMetrics")
|
||||
.operationParams(Map.of("id", id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
|
||||
@Nullable List<Integer> brokers,
|
||||
List<Integer> brokers,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
List<Integer> brokerIds = brokers == null ? List.of() : brokers;
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getAllBrokersLogdirs")
|
||||
.operationParams(Map.of("brokerIds", brokerIds))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
|
||||
Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW)
|
||||
.operationName("getBrokerConfig")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return validateAccess.thenReturn(
|
||||
ResponseEntity.ok(
|
||||
brokerService.getBrokerConfig(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerConfig))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -106,18 +89,16 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
Integer id,
|
||||
Mono<BrokerLogdirUpdateDTO> brokerLogdir,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerTopicPartitionLogDir")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
brokerLogdir
|
||||
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -126,18 +107,16 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
String name,
|
||||
Mono<BrokerConfigItemDTO> brokerConfig,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerConfigByName")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
brokerConfig
|
||||
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||
getCluster(clusterName), id, name, bci.getValue()))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
|||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -19,6 +20,7 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class ClustersController extends AbstractController implements ClustersApi {
|
||||
private final ClusterService clusterService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
|
@ -33,16 +35,14 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getClusterMetrics")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterMetrics(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,16 +50,14 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getClusterStats")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterStats(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -68,11 +66,11 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("updateClusterInfo")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,11 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -39,6 +41,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
|
||||
private final ConsumerGroupService consumerGroupService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Value("${consumer.groups.page.size:25}")
|
||||
private int defaultConsumerGroupsPageSize;
|
||||
|
@ -47,47 +50,44 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
|
||||
String id,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.consumerGroup(id)
|
||||
.consumerGroupActions(DELETE)
|
||||
.operationName("deleteConsumerGroup")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
return validateAccess.then(
|
||||
consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
|
||||
String consumerGroupId,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.consumerGroup(consumerGroupId)
|
||||
.consumerGroupActions(VIEW)
|
||||
.operationName("getConsumerGroup")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
return validateAccess.then(
|
||||
consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
.map(ConsumerGroupMapper::toDetailsDto)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.operationName("getTopicConsumerGroups")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
|
||||
consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
|
||||
|
@ -99,9 +99,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(job);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -114,13 +112,12 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
SortOrderDTO sortOrderDto,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
// consumer group access validation is within the service
|
||||
.operationName("getConsumerGroupsPage")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
consumerGroupService.getConsumerGroupsPage(
|
||||
getCluster(clusterName),
|
||||
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
|
||||
|
@ -131,7 +128,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
)
|
||||
.map(this::convertPage)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -140,13 +137,12 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
Mono<ConsumerGroupOffsetsResetDTO> resetDto,
|
||||
ServerWebExchange exchange) {
|
||||
return resetDto.flatMap(reset -> {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(reset.getTopic())
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.consumerGroupActions(RESET_OFFSETS)
|
||||
.operationName("resetConsumerGroupOffsets")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
Supplier<Mono<Void>> mono = () -> {
|
||||
var cluster = getCluster(clusterName);
|
||||
|
@ -186,9 +182,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
}
|
||||
};
|
||||
|
||||
return validateAccess(context)
|
||||
.then(mono.get())
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(mono.get());
|
||||
}).thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -199,7 +193,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
|
||||
.stream()
|
||||
.map(ConsumerGroupMapper::toDto)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import com.provectus.kafka.ui.model.TaskDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
|
||||
import com.provectus.kafka.ui.service.KafkaConnectService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -36,9 +37,8 @@ import reactor.core.publisher.Mono;
|
|||
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
|
||||
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
|
||||
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
|
||||
private static final String CONNECTOR_NAME = "connectorName";
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
|
||||
|
@ -54,16 +54,15 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectors")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.thenReturn(
|
||||
ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -71,17 +70,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
@Valid Mono<NewConnectorDTO> connector,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
|
||||
.operationName("createConnector")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,18 +87,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.connector(connectorName)
|
||||
.operationName("getConnector")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -108,18 +105,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("deleteConnector")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectName))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
@ -131,23 +126,14 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
SortOrderDTO sortOrder,
|
||||
ServerWebExchange exchange
|
||||
) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("getAllConnectors")
|
||||
.build();
|
||||
|
||||
var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
|
||||
? getConnectorsComparator(orderBy)
|
||||
: getConnectorsComparator(orderBy).reversed();
|
||||
|
||||
Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
|
||||
.filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
|
||||
.filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName))
|
||||
.sort(comparator);
|
||||
.filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName));
|
||||
|
||||
return Mono.just(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return Mono.just(ResponseEntity.ok(job.sort(comparator)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -156,18 +142,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectorConfig")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
kafkaConnectService
|
||||
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -176,19 +161,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
Mono<Map<String, Object>> requestBody,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("setConnectorConfig")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
kafkaConnectService
|
||||
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(
|
||||
kafkaConnectService
|
||||
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
|
||||
.map(ResponseEntity::ok));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -196,6 +178,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
String connectorName,
|
||||
ConnectorActionDTO action,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
ConnectAction[] connectActions;
|
||||
if (RESTART_ACTIONS.contains(action)) {
|
||||
connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.RESTART};
|
||||
|
@ -203,19 +186,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.EDIT};
|
||||
}
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(connectActions)
|
||||
.operationName("updateConnectorState")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
kafkaConnectService
|
||||
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -223,19 +204,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
String connectName,
|
||||
String connectorName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectorTasks")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return validateAccess.thenReturn(
|
||||
ResponseEntity
|
||||
.ok(kafkaConnectService
|
||||
.getConnectorTasks(getCluster(clusterName), connectName, connectorName))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -243,37 +222,34 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
String connectorName, Integer taskId,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
|
||||
.operationName("restartConnectorTask")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
kafkaConnectService
|
||||
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
|
||||
String clusterName, String connectName, ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectorPlugins")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -10,6 +10,7 @@ import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -27,42 +28,39 @@ import reactor.core.publisher.Mono;
|
|||
public class KsqlController extends AbstractController implements KsqlApi {
|
||||
|
||||
private final KsqlServiceV2 ksqlServiceV2;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
||||
Mono<KsqlCommandV2DTO> ksqlCmdDo,
|
||||
Mono<KsqlCommandV2DTO>
|
||||
ksqlCommand2Dto,
|
||||
ServerWebExchange exchange) {
|
||||
return ksqlCmdDo.flatMap(
|
||||
command -> {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.ksqlActions(KsqlAction.EXECUTE)
|
||||
.operationName("executeKsql")
|
||||
.operationParams(command)
|
||||
.build();
|
||||
return validateAccess(context).thenReturn(
|
||||
new KsqlCommandV2ResponseDTO().pipeId(
|
||||
ksqlServiceV2.registerCommand(
|
||||
getCluster(clusterName),
|
||||
command.getKsql(),
|
||||
Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
)
|
||||
.map(ResponseEntity::ok);
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.ksqlActions(KsqlAction.EXECUTE)
|
||||
.build());
|
||||
|
||||
return validateAccess.then(
|
||||
ksqlCommand2Dto.map(dto -> {
|
||||
var id = ksqlServiceV2.registerCommand(
|
||||
getCluster(clusterName),
|
||||
dto.getKsql(),
|
||||
Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
|
||||
return new KsqlCommandV2ResponseDTO().pipeId(id);
|
||||
}).map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
|
||||
String pipeId,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.ksqlActions(KsqlAction.EXECUTE)
|
||||
.operationName("openKsqlResponsePipe")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return validateAccess.thenReturn(
|
||||
ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
|
||||
.map(table -> new KsqlResponseDTO()
|
||||
.table(
|
||||
|
@ -76,28 +74,22 @@ public class KsqlController extends AbstractController implements KsqlApi {
|
|||
@Override
|
||||
public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.ksqlActions(KsqlAction.EXECUTE)
|
||||
.operationName("listStreams")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.ksqlActions(KsqlAction.EXECUTE)
|
||||
.operationName("listTables")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,16 +15,13 @@ import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
|
|||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import com.provectus.kafka.ui.model.SerdeUsageDTO;
|
||||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
|
||||
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.DeserializationService;
|
||||
import com.provectus.kafka.ui.service.MessagesService;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -32,7 +29,6 @@ import javax.annotation.Nullable;
|
|||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -49,34 +45,26 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
|
||||
private final MessagesService messagesService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||
String clusterName, String topicName, @Valid List<Integer> partitions,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_DELETE)
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).<ResponseEntity<Void>>then(
|
||||
return validateAccess.then(
|
||||
messagesService.deleteTopicMessages(
|
||||
getCluster(clusterName),
|
||||
topicName,
|
||||
Optional.ofNullable(partitions).orElse(List.of())
|
||||
).thenReturn(ResponseEntity.ok().build())
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SmartFilterTestExecutionResultDTO>> executeSmartFilterTest(
|
||||
Mono<SmartFilterTestExecutionDTO> smartFilterTestExecutionDto, ServerWebExchange exchange) {
|
||||
return smartFilterTestExecutionDto
|
||||
.map(MessagesService::execSmartFilterTest)
|
||||
.map(ResponseEntity::ok);
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -91,19 +79,11 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String keySerde,
|
||||
String valueSerde,
|
||||
ServerWebExchange exchange) {
|
||||
var contextBuilder = AccessContext.builder()
|
||||
final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_READ)
|
||||
.operationName("getTopicMessages");
|
||||
|
||||
if (StringUtils.isNoneEmpty(q) && MessageFilterTypeDTO.GROOVY_SCRIPT == filterQueryType) {
|
||||
dynamicConfigOperations.checkIfFilteringGroovyEnabled();
|
||||
}
|
||||
|
||||
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
|
||||
contextBuilder.auditActions(AuditAction.VIEW);
|
||||
}
|
||||
.build());
|
||||
|
||||
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
|
||||
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
|
||||
|
@ -122,10 +102,7 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
)
|
||||
);
|
||||
|
||||
var context = contextBuilder.build();
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(job);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -133,18 +110,17 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_PRODUCE)
|
||||
.operationName("sendTopicMessages")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
createTopicMessage.flatMap(msg ->
|
||||
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -180,12 +156,12 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String topicName,
|
||||
SerdeUsageDTO use,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.operationName("getSerdes")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
|
||||
.key(use == SerdeUsageDTO.SERIALIZE
|
||||
|
@ -195,14 +171,10 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
|
||||
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
Mono.just(dto)
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -13,8 +13,9 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
||||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -35,6 +36,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
|
||||
|
||||
private final SchemaRegistryService schemaRegistryService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
protected KafkaCluster getCluster(String clusterName) {
|
||||
|
@ -49,14 +51,13 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
|
||||
String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subject)
|
||||
.schemaActions(SchemaAction.VIEW)
|
||||
.operationName("checkSchemaCompatibility")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
newSchemaSubjectMono.flatMap(subjectDTO ->
|
||||
schemaRegistryService.checksSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
|
@ -65,20 +66,19 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
))
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
|
||||
String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schemaActions(SchemaAction.CREATE)
|
||||
.operationName("createNewSchema")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
newSchemaSubjectMono.flatMap(newSubject ->
|
||||
schemaRegistryService.registerNewSchema(
|
||||
getCluster(clusterName),
|
||||
|
@ -87,22 +87,20 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
)
|
||||
).map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteLatestSchema(
|
||||
String clusterName, String subject, ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subject)
|
||||
.schemaActions(SchemaAction.DELETE)
|
||||
.operationName("deleteLatestSchema")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -110,16 +108,14 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteSchema(
|
||||
String clusterName, String subject, ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subject)
|
||||
.schemaActions(SchemaAction.DELETE)
|
||||
.operationName("deleteSchema")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -127,16 +123,14 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
|
||||
String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subjectName)
|
||||
.schemaActions(SchemaAction.DELETE)
|
||||
.operationName("deleteSchemaByVersion")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -144,20 +138,16 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
@Override
|
||||
public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
|
||||
String clusterName, String subjectName, ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subjectName)
|
||||
.schemaActions(SchemaAction.VIEW)
|
||||
.operationName("getAllVersionsBySubject")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
Flux<SchemaSubjectDTO> schemas =
|
||||
schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
|
||||
.map(kafkaSrMapper::toDto);
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(schemas))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(schemas));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -173,37 +163,34 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
|
||||
String subject,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subject)
|
||||
.schemaActions(SchemaAction.VIEW)
|
||||
.operationName("getLatestSchema")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
|
||||
String clusterName, String subject, Integer version, ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schema(subject)
|
||||
.schemaActions(SchemaAction.VIEW)
|
||||
.operationName("getSchemaByVersion")
|
||||
.operationParams(Map.of("subject", subject, "version", version))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
schemaRegistryService.getSchemaSubjectByVersion(
|
||||
getCluster(clusterName), subject, version)
|
||||
.map(kafkaSrMapper::toDto)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -212,11 +199,6 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
@Valid Integer perPage,
|
||||
@Valid String search,
|
||||
ServerWebExchange serverWebExchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getSchemas")
|
||||
.build();
|
||||
|
||||
return schemaRegistryService
|
||||
.getAllSubjectNames(getCluster(clusterName))
|
||||
.flatMapIterable(l -> l)
|
||||
|
@ -234,32 +216,29 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
List<String> subjectsToRender = filteredSubjects.stream()
|
||||
.skip(subjectToSkip)
|
||||
.limit(pageSize)
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
|
||||
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
|
||||
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
|
||||
}).map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}).map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
|
||||
String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
|
||||
.operationName("updateGlobalSchemaCompatibilityLevel")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateGlobalSchemaCompatibility(
|
||||
getCluster(clusterName),
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -268,14 +247,12 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
|
||||
String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.schemaActions(SchemaAction.EDIT)
|
||||
.operationName("updateSchemaCompatibilityLevel")
|
||||
.operationParams(Map.of("subject", subject))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
compatibilityLevelMono
|
||||
.flatMap(compatibilityLevelDTO ->
|
||||
schemaRegistryService.updateSchemaCompatibility(
|
||||
|
@ -283,7 +260,6 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
subject,
|
||||
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
|
||||
))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
|
|
@ -22,15 +22,14 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
|
|||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.TopicsService;
|
||||
import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -52,79 +51,70 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
private final TopicsService topicsService;
|
||||
private final TopicAnalysisService topicAnalysisService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDTO>> createTopic(
|
||||
String clusterName, @Valid Mono<TopicCreationDTO> topicCreationMono, ServerWebExchange exchange) {
|
||||
return topicCreationMono.flatMap(topicCreation -> {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topicActions(CREATE)
|
||||
.operationName("createTopic")
|
||||
.operationParams(topicCreation)
|
||||
.build();
|
||||
String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
|
||||
|
||||
return validateAccess(context)
|
||||
.then(topicsService.createTopic(getCluster(clusterName), topicCreation))
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
});
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topicActions(CREATE)
|
||||
.build());
|
||||
|
||||
return validateAccess.then(
|
||||
topicsService.createTopic(getCluster(clusterName), topicCreation)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
|
||||
String topicName, ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW, CREATE, DELETE)
|
||||
.operationName("recreateTopic")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
topicsService.recreateTopic(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDTO>> cloneTopic(
|
||||
String clusterName, String topicName, String newTopicName, ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW, CREATE)
|
||||
.operationName("cloneTopic")
|
||||
.operationParams(Map.of("newTopicName", newTopicName))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopic(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(DELETE)
|
||||
.operationName("deleteTopic")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
topicsService.deleteTopic(getCluster(clusterName), topicName)
|
||||
.thenReturn(ResponseEntity.ok().<Void>build())
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(
|
||||
topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
@ -132,40 +122,38 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW)
|
||||
.operationName("getTopicConfigs")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
|
||||
.map(lst -> lst.stream()
|
||||
.map(InternalTopicConfig::from)
|
||||
.map(clusterMapper::toTopicConfig)
|
||||
.toList())
|
||||
.collect(toList()))
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW)
|
||||
.operationName("getTopicDetails")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
topicsService.getTopicDetails(getCluster(clusterName), topicName)
|
||||
.map(clusterMapper::toTopicDetails)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -178,19 +166,13 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
@Valid SortOrderDTO sortOrder,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getTopics")
|
||||
.build();
|
||||
|
||||
return topicsService.getTopicsForPagination(getCluster(clusterName))
|
||||
.flatMap(topics -> accessControlService.filterViewableTopics(topics, clusterName))
|
||||
.flatMap(topics -> {
|
||||
.flatMap(existingTopics -> {
|
||||
int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
|
||||
var topicsToSkip = ((page != null && page > 0 ? page : 1) - 1) * pageSize;
|
||||
var comparator = sortOrder == null || !sortOrder.equals(SortOrderDTO.DESC)
|
||||
? getComparatorForTopic(orderBy) : getComparatorForTopic(orderBy).reversed();
|
||||
List<InternalTopic> filtered = topics.stream()
|
||||
List<InternalTopic> filtered = existingTopics.stream()
|
||||
.filter(topic -> !topic.isInternal()
|
||||
|| showInternal != null && showInternal)
|
||||
.filter(topic -> search == null || StringUtils.containsIgnoreCase(topic.getName(), search))
|
||||
|
@ -206,13 +188,15 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.collect(toList());
|
||||
|
||||
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
|
||||
.flatMapMany(Flux::fromIterable)
|
||||
.filterWhen(dto -> accessControlService.isTopicAccessible(dto, clusterName))
|
||||
.collectList()
|
||||
.map(topicsToRender ->
|
||||
new TopicsResponseDTO()
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
|
||||
.pageCount(totalPages));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -220,19 +204,18 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
String clusterName, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW, EDIT)
|
||||
.operationName("updateTopic")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
topicsService
|
||||
.updateTopic(getCluster(clusterName), topicName, topicUpdate)
|
||||
.map(clusterMapper::toTopic)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -241,17 +224,17 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
Mono<PartitionsIncreaseDTO> partitionsIncrease,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW, EDIT)
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
partitionsIncrease.flatMap(partitions ->
|
||||
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
|
||||
).map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -260,34 +243,31 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
Mono<ReplicationFactorChangeDTO> replicationFactorChange,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW, EDIT)
|
||||
.operationName("changeReplicationFactor")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
replicationFactorChange
|
||||
.flatMap(rfc ->
|
||||
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> analyzeTopic(String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_READ)
|
||||
.operationName("analyzeTopic")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
topicAnalysisService.analyze(getCluster(clusterName), topicName)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
@ -295,17 +275,15 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
@Override
|
||||
public Mono<ResponseEntity<Void>> cancelTopicAnalysis(String clusterName, String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_READ)
|
||||
.operationName("cancelTopicAnalysis")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName);
|
||||
|
||||
return validateAccess.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
||||
|
@ -314,46 +292,15 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(MESSAGES_READ)
|
||||
.operationName("getTopicAnalysis")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElseGet(() -> ResponseEntity.notFound().build()))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW)
|
||||
.operationName("getActiveProducerStates")
|
||||
.build();
|
||||
|
||||
Comparator<TopicProducerStateDTO> ordering =
|
||||
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
|
||||
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
|
||||
|
||||
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
|
||||
.flatMapMany(statesMap ->
|
||||
Flux.fromStream(
|
||||
statesMap.entrySet().stream()
|
||||
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
|
||||
.sorted(ordering)));
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(states)
|
||||
return validateAccess.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.orElseGet(() -> ResponseEntity.notFound().build()));
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(
|
||||
|
|
|
@ -1,23 +1,38 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
public abstract class AbstractEmitter {
|
||||
|
||||
private final MessagesProcessing messagesProcessing;
|
||||
private final PollingSettings pollingSettings;
|
||||
private final PollingThrottler throttler;
|
||||
protected final PollingSettings pollingSettings;
|
||||
|
||||
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
|
||||
this.messagesProcessing = messagesProcessing;
|
||||
this.pollingSettings = pollingSettings;
|
||||
this.throttler = pollingSettings.getPollingThrottler();
|
||||
}
|
||||
|
||||
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
|
||||
var records = consumer.pollEnhanced(pollingSettings.getPollTimeout());
|
||||
sendConsuming(sink, records);
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
|
||||
return poll(sink, consumer, pollingSettings.getPollTimeout());
|
||||
}
|
||||
|
||||
protected ConsumerRecords<Bytes, Bytes> poll(
|
||||
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer, Duration timeout) {
|
||||
Instant start = Instant.now();
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
|
||||
Instant finish = Instant.now();
|
||||
int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
|
||||
throttler.throttleAfterPoll(polledBytes);
|
||||
return records;
|
||||
}
|
||||
|
||||
|
@ -25,16 +40,19 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
|
|||
return messagesProcessing.limitReached();
|
||||
}
|
||||
|
||||
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
|
||||
messagesProcessing.send(sink, records);
|
||||
protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecord<Bytes, Bytes> msg) {
|
||||
messagesProcessing.sendMsg(sink, msg);
|
||||
}
|
||||
|
||||
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
|
||||
messagesProcessing.sendPhase(sink, name);
|
||||
}
|
||||
|
||||
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink, PolledRecords records) {
|
||||
messagesProcessing.sentConsumingInfo(sink, records);
|
||||
protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> records,
|
||||
long elapsed) {
|
||||
return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
|
||||
}
|
||||
|
||||
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class BackwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public BackwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
false,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readToOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readToOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readToOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().from()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readToOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readToOffsets.forEach((tp, toOffset) -> {
|
||||
long tpStartOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
if (toOffset > tpStartOffset) {
|
||||
result.put(tp, new FromToOffset(Math.max(tpStartOffset, toOffset - msgsToPollPerPartition), toOffset));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,128 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class BackwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
private final int messagesPerPage;
|
||||
|
||||
public BackwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting backward polling for {}", consumerPosition);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Created consumer");
|
||||
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
|
||||
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
|
||||
|
||||
while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) {
|
||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||
if (sink.isCancelled()) {
|
||||
return; //fast return in case of sink cancellation
|
||||
}
|
||||
long beginOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
|
||||
|
||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||
.forEach(r -> sendMessage(sink, r));
|
||||
|
||||
if (beginOffset == readFromOffset) {
|
||||
// we fully read this partition -> removing it from polling iterations
|
||||
readUntilOffsets.remove(tp);
|
||||
} else {
|
||||
// updating 'to' offset for next polling iteration
|
||||
readUntilOffsets.put(tp, readFromOffset);
|
||||
}
|
||||
});
|
||||
if (readUntilOffsets.isEmpty()) {
|
||||
log.debug("begin reached after partitions poll iteration");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.debug("sink is cancelled after partitions poll iteration");
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
||||
TopicPartition tp,
|
||||
long fromOffset,
|
||||
long toOffset,
|
||||
Consumer<Bytes, Bytes> consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink
|
||||
) {
|
||||
consumer.assign(Collections.singleton(tp));
|
||||
consumer.seek(tp, fromOffset);
|
||||
sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset));
|
||||
int desiredMsgsToPoll = (int) (toOffset - fromOffset);
|
||||
|
||||
var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& recordsToSend.size() < desiredMsgsToPoll
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
|
||||
emptyPolls.count(polledRecords);
|
||||
|
||||
log.debug("{} records polled from {}", polledRecords.count(), tp);
|
||||
|
||||
var filteredRecords = polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < toOffset)
|
||||
.toList();
|
||||
|
||||
if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
|
||||
// we already read all messages in target offsets interval
|
||||
break;
|
||||
}
|
||||
recordsToSend.addAll(filteredRecords);
|
||||
}
|
||||
log.debug("{} records to send", recordsToSend.size());
|
||||
Collections.reverse(recordsToSend);
|
||||
return recordsToSend;
|
||||
}
|
||||
}
|
|
@ -2,6 +2,9 @@ package com.provectus.kafka.ui.emitter;
|
|||
|
||||
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
class ConsumingStats {
|
||||
|
@ -9,37 +12,41 @@ class ConsumingStats {
|
|||
private long bytes = 0;
|
||||
private int records = 0;
|
||||
private long elapsed = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
bytes += polledRecords.bytes();
|
||||
records += polledRecords.count();
|
||||
elapsed += polledRecords.elapsed().toMillis();
|
||||
/**
|
||||
* returns bytes polled.
|
||||
*/
|
||||
int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed,
|
||||
int filterApplyErrors) {
|
||||
int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
|
||||
bytes += polledBytes;
|
||||
this.records += polledRecords.count();
|
||||
this.elapsed += elapsed;
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
|
||||
.consuming(createConsumingStats())
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
);
|
||||
return polledBytes;
|
||||
}
|
||||
|
||||
void incFilterApplyError() {
|
||||
filterApplyErrors++;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.DONE)
|
||||
.consuming(createConsumingStats())
|
||||
.consuming(createConsumingStats(sink, filterApplyErrors))
|
||||
);
|
||||
}
|
||||
|
||||
private TopicMessageConsumingDTO createConsumingStats() {
|
||||
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
|
||||
int filterApplyErrors) {
|
||||
return new TopicMessageConsumingDTO()
|
||||
.bytesConsumed(bytes)
|
||||
.elapsedMs(elapsed)
|
||||
.isCancelled(false)
|
||||
.bytesConsumed(this.bytes)
|
||||
.elapsedMs(this.elapsed)
|
||||
.isCancelled(sink.isCancelled())
|
||||
.filterApplyErrors(filterApplyErrors)
|
||||
.messagesConsumed(records);
|
||||
.messagesConsumed(this.records);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
|
||||
// In some situations it is hard to say whether records range (between two offsets) was fully polled.
|
||||
// This happens when we have holes in records sequences that is usual case for compact topics or
|
||||
// topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
|
||||
// there is no guarantee that you will ever see record with offset Y.
|
||||
// To workaround this we can assume that after N consecutive empty polls all target messages were read.
|
||||
public class EmptyPollsCounter {
|
||||
|
||||
private final int maxEmptyPolls;
|
||||
|
||||
private int emptyPolls = 0;
|
||||
|
||||
EmptyPollsCounter(int maxEmptyPolls) {
|
||||
this.maxEmptyPolls = maxEmptyPolls;
|
||||
}
|
||||
|
||||
public void count(ConsumerRecords<?, ?> polled) {
|
||||
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
|
||||
}
|
||||
|
||||
public boolean noDataEmptyPollsReached() {
|
||||
return emptyPolls >= maxEmptyPolls;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.provectus.kafka.ui.util.ApplicationMetrics;
|
||||
import java.time.Duration;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.experimental.Delegate;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
|
||||
public class EnhancedConsumer extends KafkaConsumer<Bytes, Bytes> {
|
||||
|
||||
private final PollingThrottler throttler;
|
||||
private final ApplicationMetrics metrics;
|
||||
private String pollingTopic;
|
||||
|
||||
public EnhancedConsumer(Properties properties,
|
||||
PollingThrottler throttler,
|
||||
ApplicationMetrics metrics) {
|
||||
super(properties, new BytesDeserializer(), new BytesDeserializer());
|
||||
this.throttler = throttler;
|
||||
this.metrics = metrics;
|
||||
metrics.activeConsumers().incrementAndGet();
|
||||
}
|
||||
|
||||
public PolledRecords pollEnhanced(Duration dur) {
|
||||
var stopwatch = Stopwatch.createStarted();
|
||||
ConsumerRecords<Bytes, Bytes> polled = poll(dur);
|
||||
PolledRecords polledEnhanced = PolledRecords.create(polled, stopwatch.elapsed());
|
||||
var throttled = throttler.throttleAfterPoll(polledEnhanced.bytes());
|
||||
metrics.meterPolledRecords(pollingTopic, polledEnhanced, throttled);
|
||||
return polledEnhanced;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assign(Collection<TopicPartition> partitions) {
|
||||
super.assign(partitions);
|
||||
Set<String> assignedTopics = partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet());
|
||||
Preconditions.checkState(assignedTopics.size() == 1);
|
||||
this.pollingTopic = assignedTopics.iterator().next();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Pattern pattern) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Collection<String> topics) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(Duration timeout) {
|
||||
metrics.activeConsumers().decrementAndGet();
|
||||
super.close(timeout);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
public class ForwardEmitter extends RangePollingEmitter {
|
||||
|
||||
public ForwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
PollingSettings pollingSettings) {
|
||||
super(
|
||||
consumerSupplier,
|
||||
consumerPosition,
|
||||
messagesPerPage,
|
||||
new MessagesProcessing(
|
||||
deserializer,
|
||||
filter,
|
||||
true,
|
||||
messagesPerPage
|
||||
),
|
||||
pollingSettings
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
|
||||
SeekOperations seekOperations) {
|
||||
TreeMap<TopicPartition, Long> readFromOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
if (prevRange.isEmpty()) {
|
||||
readFromOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
} else {
|
||||
readFromOffsets.putAll(
|
||||
prevRange.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().to()))
|
||||
);
|
||||
}
|
||||
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readFromOffsets.size());
|
||||
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readFromOffsets.forEach((tp, fromOffset) -> {
|
||||
long tpEndOffset = seekOperations.getEndOffsets().get(tp);
|
||||
if (fromOffset < tpEndOffset) {
|
||||
result.put(tp, new FromToOffset(fromOffset, Math.min(tpEndOffset, fromOffset + msgsToPollPerPartition)));
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class ForwardRecordEmitter
|
||||
extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition position;
|
||||
|
||||
public ForwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition position,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.position = position;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting forward polling for {}", position);
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Assigning partitions");
|
||||
var seekOperations = SeekOperations.create(consumer, position);
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
|
||||
while (!sink.isCancelled()
|
||||
&& !sendLimitReached()
|
||||
&& !seekOperations.assignedPartitionsFullyPolled()
|
||||
&& !emptyPolls.noDataEmptyPollsReached()) {
|
||||
|
||||
sendPhase(sink, "Polling");
|
||||
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
|
||||
emptyPolls.count(records);
|
||||
|
||||
log.debug("{} records polled", records.count());
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||
sendMessage(sink, msg);
|
||||
}
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,75 +1,71 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static java.util.stream.Collectors.collectingAndThen;
|
||||
import static java.util.stream.Collectors.groupingBy;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Streams;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Predicate;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
class MessagesProcessing {
|
||||
public class MessagesProcessing {
|
||||
|
||||
private final ConsumingStats consumingStats = new ConsumingStats();
|
||||
private long sentMessages = 0;
|
||||
private int filterApplyErrors = 0;
|
||||
|
||||
private final ConsumerRecordDeserializer deserializer;
|
||||
private final Predicate<TopicMessageDTO> filter;
|
||||
private final boolean ascendingSortBeforeSend;
|
||||
private final @Nullable Integer limit;
|
||||
|
||||
public MessagesProcessing(ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
@Nullable Integer limit) {
|
||||
this.deserializer = deserializer;
|
||||
this.filter = filter;
|
||||
this.limit = limit;
|
||||
}
|
||||
|
||||
boolean limitReached() {
|
||||
return limit != null && sentMessages >= limit;
|
||||
}
|
||||
|
||||
void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> polled) {
|
||||
sortForSending(polled, ascendingSortBeforeSend)
|
||||
.forEach(rec -> {
|
||||
if (!limitReached() && !sink.isCancelled()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
consumingStats.incFilterApplyError();
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
});
|
||||
void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
|
||||
if (!sink.isCancelled() && !limitReached()) {
|
||||
TopicMessageDTO topicMessage = deserializer.deserialize(rec);
|
||||
try {
|
||||
if (filter.test(topicMessage)) {
|
||||
sink.next(
|
||||
new TopicMessageEventDTO()
|
||||
.type(TopicMessageEventDTO.TypeEnum.MESSAGE)
|
||||
.message(topicMessage)
|
||||
);
|
||||
sentMessages++;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
filterApplyErrors++;
|
||||
log.trace("Error applying filter for message {}", topicMessage);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
|
||||
int sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink,
|
||||
ConsumerRecords<Bytes, Bytes> polledRecords,
|
||||
long elapsed) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendConsumingEvt(sink, polledRecords);
|
||||
return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
|
||||
if (!sink.isCancelled()) {
|
||||
consumingStats.sendFinishEvent(sink);
|
||||
consumingStats.sendFinishEvent(sink, filterApplyErrors);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -83,30 +79,4 @@ class MessagesProcessing {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Sorting by timestamps, BUT requesting that records within same partitions should be ordered by offsets.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Iterable<ConsumerRecord<Bytes, Bytes>> sortForSending(Iterable<ConsumerRecord<Bytes, Bytes>> records,
|
||||
boolean asc) {
|
||||
Comparator<ConsumerRecord> offsetComparator = asc
|
||||
? Comparator.comparingLong(ConsumerRecord::offset)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::offset).reversed();
|
||||
|
||||
// partition -> sorted by offsets records
|
||||
Map<Integer, List<ConsumerRecord<Bytes, Bytes>>> perPartition = Streams.stream(records)
|
||||
.collect(
|
||||
groupingBy(
|
||||
ConsumerRecord::partition,
|
||||
TreeMap::new,
|
||||
collectingAndThen(toList(), lst -> lst.stream().sorted(offsetComparator).toList())));
|
||||
|
||||
Comparator<ConsumerRecord> tsComparator = asc
|
||||
? Comparator.comparing(ConsumerRecord::timestamp)
|
||||
: Comparator.<ConsumerRecord>comparingLong(ConsumerRecord::timestamp).reversed();
|
||||
|
||||
// merge-sorting records from partitions one by one using timestamp comparator
|
||||
return Iterables.mergeSorted(perPartition.values(), tsComparator);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -5,15 +5,15 @@ import java.util.Collection;
|
|||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Slf4j
|
||||
@Getter
|
||||
class OffsetsInfo {
|
||||
public class OffsetsInfo {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
|
||||
|
@ -23,15 +23,16 @@ class OffsetsInfo {
|
|||
private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
|
||||
private final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
OffsetsInfo(Consumer<?, ?> consumer, Collection<TopicPartition> targetPartitions) {
|
||||
public OffsetsInfo(Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> targetPartitions) {
|
||||
this.consumer = consumer;
|
||||
this.beginOffsets = consumer.beginningOffsets(targetPartitions);
|
||||
this.endOffsets = consumer.endOffsets(targetPartitions);
|
||||
|
@ -45,8 +46,8 @@ class OffsetsInfo {
|
|||
});
|
||||
}
|
||||
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp : consumer.assignment()) {
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp: consumer.assignment()) {
|
||||
Preconditions.checkArgument(endOffsets.containsKey(tp));
|
||||
if (endOffsets.get(tp) > consumer.position(tp)) {
|
||||
return false;
|
||||
|
@ -55,10 +56,4 @@ class OffsetsInfo {
|
|||
return true;
|
||||
}
|
||||
|
||||
long summaryOffsetsRange() {
|
||||
MutableLong cnt = new MutableLong();
|
||||
nonEmptyPartitions.forEach(tp -> cnt.add(endOffsets.get(tp) - beginOffsets.get(tp)));
|
||||
return cnt.getValue();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.header.Header;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public record PolledRecords(int count,
|
||||
int bytes,
|
||||
Duration elapsed,
|
||||
ConsumerRecords<Bytes, Bytes> records) implements Iterable<ConsumerRecord<Bytes, Bytes>> {
|
||||
|
||||
static PolledRecords create(ConsumerRecords<Bytes, Bytes> polled, Duration pollDuration) {
|
||||
return new PolledRecords(
|
||||
polled.count(),
|
||||
calculatePolledRecSize(polled),
|
||||
pollDuration,
|
||||
polled
|
||||
);
|
||||
}
|
||||
|
||||
public List<ConsumerRecord<Bytes, Bytes>> records(TopicPartition tp) {
|
||||
return records.records(tp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ConsumerRecord<Bytes, Bytes>> iterator() {
|
||||
return records.iterator();
|
||||
}
|
||||
|
||||
private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
|
||||
int polledBytes = 0;
|
||||
for (ConsumerRecord<Bytes, Bytes> rec : recs) {
|
||||
for (Header header : rec.headers()) {
|
||||
polledBytes +=
|
||||
(header.key() != null ? header.key().getBytes().length : 0)
|
||||
+ (header.value() != null ? header.value().length : 0);
|
||||
}
|
||||
polledBytes += rec.key() == null ? 0 : rec.serializedKeySize();
|
||||
polledBytes += rec.value() == null ? 0 : rec.serializedValueSize();
|
||||
}
|
||||
return polledBytes;
|
||||
}
|
||||
}
|
|
@ -8,8 +8,13 @@ import java.util.function.Supplier;
|
|||
public class PollingSettings {
|
||||
|
||||
private static final Duration DEFAULT_POLL_TIMEOUT = Duration.ofMillis(1_000);
|
||||
private static final Duration DEFAULT_PARTITION_POLL_TIMEOUT = Duration.ofMillis(200);
|
||||
private static final int DEFAULT_NO_DATA_EMPTY_POLLS = 3;
|
||||
|
||||
private final Duration pollTimeout;
|
||||
private final Duration partitionPollTimeout;
|
||||
private final int notDataEmptyPolls; //see EmptyPollsCounter docs
|
||||
|
||||
private final Supplier<PollingThrottler> throttlerSupplier;
|
||||
|
||||
public static PollingSettings create(ClustersProperties.Cluster cluster,
|
||||
|
@ -21,8 +26,18 @@ public class PollingSettings {
|
|||
? Duration.ofMillis(pollingProps.getPollTimeoutMs())
|
||||
: DEFAULT_POLL_TIMEOUT;
|
||||
|
||||
var partitionPollTimeout = pollingProps.getPartitionPollTimeout() != null
|
||||
? Duration.ofMillis(pollingProps.getPartitionPollTimeout())
|
||||
: Duration.ofMillis(pollTimeout.toMillis() / 5);
|
||||
|
||||
int noDataEmptyPolls = pollingProps.getNoDataEmptyPolls() != null
|
||||
? pollingProps.getNoDataEmptyPolls()
|
||||
: DEFAULT_NO_DATA_EMPTY_POLLS;
|
||||
|
||||
return new PollingSettings(
|
||||
pollTimeout,
|
||||
partitionPollTimeout,
|
||||
noDataEmptyPolls,
|
||||
PollingThrottler.throttlerSupplier(cluster)
|
||||
);
|
||||
}
|
||||
|
@ -30,20 +45,34 @@ public class PollingSettings {
|
|||
public static PollingSettings createDefault() {
|
||||
return new PollingSettings(
|
||||
DEFAULT_POLL_TIMEOUT,
|
||||
DEFAULT_PARTITION_POLL_TIMEOUT,
|
||||
DEFAULT_NO_DATA_EMPTY_POLLS,
|
||||
PollingThrottler::noop
|
||||
);
|
||||
}
|
||||
|
||||
private PollingSettings(Duration pollTimeout,
|
||||
Duration partitionPollTimeout,
|
||||
int notDataEmptyPolls,
|
||||
Supplier<PollingThrottler> throttlerSupplier) {
|
||||
this.pollTimeout = pollTimeout;
|
||||
this.partitionPollTimeout = partitionPollTimeout;
|
||||
this.notDataEmptyPolls = notDataEmptyPolls;
|
||||
this.throttlerSupplier = throttlerSupplier;
|
||||
}
|
||||
|
||||
public EmptyPollsCounter createEmptyPollsCounter() {
|
||||
return new EmptyPollsCounter(notDataEmptyPolls);
|
||||
}
|
||||
|
||||
public Duration getPollTimeout() {
|
||||
return pollTimeout;
|
||||
}
|
||||
|
||||
public Duration getPartitionPollTimeout() {
|
||||
return partitionPollTimeout;
|
||||
}
|
||||
|
||||
public PollingThrottler getPollingThrottler() {
|
||||
return throttlerSupplier.get();
|
||||
}
|
||||
|
|
|
@ -3,8 +3,11 @@ package com.provectus.kafka.ui.emitter;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.util.concurrent.RateLimiter;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Slf4j
|
||||
public class PollingThrottler {
|
||||
|
@ -33,17 +36,18 @@ public class PollingThrottler {
|
|||
return new PollingThrottler("noop", RateLimiter.create(Long.MAX_VALUE));
|
||||
}
|
||||
|
||||
//returns true if polling was throttled
|
||||
public boolean throttleAfterPoll(int polledBytes) {
|
||||
public void throttleAfterPoll(int polledBytes) {
|
||||
if (polledBytes > 0) {
|
||||
double sleptSeconds = rateLimiter.acquire(polledBytes);
|
||||
if (!throttled && sleptSeconds > 0.0) {
|
||||
throttled = true;
|
||||
log.debug("Polling throttling enabled for cluster {} at rate {} bytes/sec", clusterName, rateLimiter.getRate());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public void throttleAfterPoll(ConsumerRecords<Bytes, Bytes> polled) {
|
||||
throttleAfterPoll(ConsumerRecordsUtil.calculatePolledSize(polled));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
abstract class RangePollingEmitter extends AbstractEmitter {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
protected final int messagesPerPage;
|
||||
|
||||
protected RangePollingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
protected record FromToOffset(/*inclusive*/ long from, /*exclusive*/ long to) {
|
||||
}
|
||||
|
||||
//should return empty map if polling should be stopped
|
||||
protected abstract TreeMap<TopicPartition, FromToOffset> nextPollingRange(
|
||||
TreeMap<TopicPartition, FromToOffset> prevRange, //empty on start
|
||||
SeekOperations seekOperations
|
||||
);
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Consumer created");
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
TreeMap<TopicPartition, FromToOffset> pollRange = nextPollingRange(new TreeMap<>(), seekOperations);
|
||||
log.debug("Starting from offsets {}", pollRange);
|
||||
|
||||
while (!sink.isCancelled() && !pollRange.isEmpty() && !sendLimitReached()) {
|
||||
var polled = poll(consumer, sink, pollRange);
|
||||
send(sink, polled);
|
||||
pollRange = nextPollingRange(pollRange, seekOperations);
|
||||
}
|
||||
if (sink.isCancelled()) {
|
||||
log.debug("Polling finished due to sink cancellation");
|
||||
}
|
||||
sendFinishStatsAndCompleteSink(sink);
|
||||
log.debug("Polling finished");
|
||||
} catch (InterruptException kafkaInterruptException) {
|
||||
log.debug("Polling finished due to thread interruption");
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> poll(EnhancedConsumer consumer,
|
||||
FluxSink<TopicMessageEventDTO> sink,
|
||||
TreeMap<TopicPartition, FromToOffset> range) {
|
||||
log.trace("Polling range {}", range);
|
||||
sendPhase(sink,
|
||||
"Polling partitions: %s".formatted(range.keySet().stream().map(TopicPartition::partition).sorted().toList()));
|
||||
|
||||
consumer.assign(range.keySet());
|
||||
range.forEach((tp, fromTo) -> consumer.seek(tp, fromTo.from));
|
||||
|
||||
List<ConsumerRecord<Bytes, Bytes>> result = new ArrayList<>();
|
||||
while (!sink.isCancelled() && consumer.paused().size() < range.size()) {
|
||||
var polledRecords = poll(sink, consumer);
|
||||
range.forEach((tp, fromTo) -> {
|
||||
polledRecords.records(tp).stream()
|
||||
.filter(r -> r.offset() < fromTo.to)
|
||||
.forEach(result::add);
|
||||
|
||||
//next position is out of target range -> pausing partition
|
||||
if (consumer.position(tp) >= fromTo.to) {
|
||||
consumer.pause(List.of(tp));
|
||||
}
|
||||
});
|
||||
}
|
||||
consumer.resume(consumer.paused());
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -10,18 +10,17 @@ import java.util.stream.Collectors;
|
|||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
|
||||
public class SeekOperations {
|
||||
class SeekOperations {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
private final OffsetsInfo offsetsInfo;
|
||||
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||
|
||||
public static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
OffsetsInfo offsetsInfo;
|
||||
if (consumerPosition.getSeekTo() == null) {
|
||||
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
|
||||
|
@ -35,37 +34,25 @@ public class SeekOperations {
|
|||
);
|
||||
}
|
||||
|
||||
public void assignAndSeekNonEmptyPartitions() {
|
||||
void assignAndSeekNonEmptyPartitions() {
|
||||
consumer.assign(offsetsForSeek.keySet());
|
||||
offsetsForSeek.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> getBeginOffsets() {
|
||||
Map<TopicPartition, Long> getBeginOffsets() {
|
||||
return offsetsInfo.getBeginOffsets();
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> getEndOffsets() {
|
||||
Map<TopicPartition, Long> getEndOffsets() {
|
||||
return offsetsInfo.getEndOffsets();
|
||||
}
|
||||
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
return offsetsInfo.assignedPartitionsFullyPolled();
|
||||
}
|
||||
|
||||
// sum of (end - start) offsets for all partitions
|
||||
public long summaryOffsetsRange() {
|
||||
return offsetsInfo.summaryOffsetsRange();
|
||||
}
|
||||
|
||||
// sum of differences between initial consumer seek and current consumer position (across all partitions)
|
||||
public long offsetsProcessedFromSeek() {
|
||||
MutableLong count = new MutableLong();
|
||||
offsetsForSeek.forEach((tp, initialOffset) -> count.add(consumer.position(tp) - initialOffset));
|
||||
return count.getValue();
|
||||
}
|
||||
|
||||
// Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
|
||||
public Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
return offsetsForSeek;
|
||||
}
|
||||
|
||||
|
@ -74,19 +61,19 @@ public class SeekOperations {
|
|||
*/
|
||||
@VisibleForTesting
|
||||
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
switch (seekType) {
|
||||
case LATEST:
|
||||
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case BEGINNING:
|
||||
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case OFFSET:
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
return fixOffsets(offsetsInfo, seekTo);
|
||||
case TIMESTAMP:
|
||||
Preconditions.checkNotNull(seekTo);
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
|
@ -113,7 +100,7 @@ public class SeekOperations {
|
|||
}
|
||||
|
||||
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
timestamps = new HashMap<>(timestamps);
|
||||
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
|
||||
|
|
|
@ -1,28 +1,27 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import java.util.HashMap;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.errors.InterruptException;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@Slf4j
|
||||
public class TailingEmitter extends AbstractEmitter {
|
||||
public class TailingEmitter extends AbstractEmitter
|
||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<EnhancedConsumer> consumerSupplier;
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public TailingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
|
||||
public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
ConsumerRecordDeserializer deserializer,
|
||||
Predicate<TopicMessageDTO> filter,
|
||||
MessagesProcessing messagesProcessing,
|
||||
PollingSettings pollingSettings) {
|
||||
super(new MessagesProcessing(deserializer, filter, false, null), pollingSettings);
|
||||
super(messagesProcessing, pollingSettings);
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
@ -30,12 +29,12 @@ public class TailingEmitter extends AbstractEmitter {
|
|||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
log.debug("Starting tailing polling for {}", consumerPosition);
|
||||
try (EnhancedConsumer consumer = consumerSupplier.get()) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
assignAndSeek(consumer);
|
||||
while (!sink.isCancelled()) {
|
||||
sendPhase(sink, "Polling");
|
||||
var polled = poll(sink, consumer);
|
||||
send(sink, polled);
|
||||
polled.forEach(r -> sendMessage(sink, r));
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Tailing finished");
|
||||
|
@ -48,7 +47,7 @@ public class TailingEmitter extends AbstractEmitter {
|
|||
}
|
||||
}
|
||||
|
||||
private void assignAndSeek(EnhancedConsumer consumer) {
|
||||
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
|
||||
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions
|
||||
|
|
|
@ -106,7 +106,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
err.setFieldName(e.getKey());
|
||||
err.setRestrictions(List.copyOf(e.getValue()));
|
||||
return err;
|
||||
}).toList();
|
||||
}).collect(Collectors.toList());
|
||||
|
||||
var message = fieldsErrors.isEmpty()
|
||||
? exception.getMessage()
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
package com.provectus.kafka.ui.exception;
|
||||
|
||||
public class JsonAvroConversionException extends ValidationException {
|
||||
public JsonAvroConversionException(String message) {
|
||||
super(message);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue