Compare commits

..

12 commits

Author SHA1 Message Date
Roman Zabaluev
56fa824510 BE: RBAC: Implement an authorities extractor to support subject-level role matching (#3979)
Co-authored-by: Ilya Kuramshin <iliax@proton.me>
(cherry picked from commit b700ac3991)
2023-06-27 20:42:41 +08:00
Roman Zabaluev
b0c0e06f19 BE: RBAC: Fix viewable topics filter (#3946)
(cherry picked from commit 6fe6165427)
2023-06-27 20:42:34 +08:00
Roman Zabaluev
556ec290eb BE: Make webclients use system proxy (#3881)
Co-authored-by: Ilya Kuramshin <iliax@proton.me>
(cherry picked from commit f19abb2036)
2023-06-27 20:42:12 +08:00
Roman Zabaluev
2b334d5209 FE: Fix react query not initiating requests for local host (#3915)
(cherry picked from commit 2ac8646769)
2023-06-27 20:41:48 +08:00
Roman Zabaluev
59f03200f4 FE: Fix permissions check for cluster edit (#3862)
(cherry picked from commit 71a7a1ec84)
2023-06-27 20:41:31 +08:00
David Bejanyan
aa633f424c FE: Fix latest version is null (#3833)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
(cherry picked from commit f22c910f5c)
2023-06-27 20:41:20 +08:00
Ilya Kuramshin
f7aaea85f4 BE: Chore: CVEs fixes, May 2023 (#3840)
Co-authored-by: iliax <ikuramshin@provectus.com>
(cherry picked from commit f7d85d86e6)
2023-06-27 20:41:06 +08:00
Roman Zabaluev
4ae1da6ebf RBAC: Fix KC restart permissions (#3891)
Co-authored-by: David Bejanyan <58771979+David-DB88@users.noreply.github.com>
(cherry picked from commit 8a68ba0778)
2023-06-27 20:39:50 +08:00
Roman Zabaluev
3377289517 FE: Ignore expected 404 on topic statistics page (#3964)
(cherry picked from commit b9bbb1a823)
2023-06-27 20:38:23 +08:00
Roman Zabaluev
8727393501 BE: Fix CORS once again (#3957)
(cherry picked from commit 9549f68d7e)
2023-06-27 20:38:03 +08:00
Vikas Rajput
26464ba37d FE: Messages: Reset timestamp value w/ Clear all filters (#3923)
Co-authored-by: Roman Zabaluev <rzabaluev@provectus.com>
(cherry picked from commit d0088490a4)
2023-06-27 20:37:39 +08:00
David Bejanyan
023e8e3b3c FE: Fix version display with a tag (#3827)
(cherry picked from commit ab9d0e2b3f)
2023-06-27 20:37:07 +08:00
322 changed files with 5301 additions and 10202 deletions

4
.github/CODEOWNERS vendored
View file

@ -14,5 +14,5 @@
# TESTS
/kafka-ui-e2e-checks/ @provectus/kafka-qa
# INFRA
/.github/workflows/ @provectus/kafka-devops
# HELM CHARTS
/charts/ @provectus/kafka-devops

View file

@ -1,8 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: Report helm issue
url: https://github.com/provectus/kafka-ui-charts
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
- name: Official documentation
url: https://docs.kafka-ui.provectus.io/
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.

92
.github/ISSUE_TEMPLATE/helm.yml vendored Normal file
View file

@ -0,0 +1,92 @@
name: "⎈ K8s/Helm problem report"
description: "Report a problem with k8s/helm charts/etc"
labels: ["status/triage", "scope/k8s"]
assignees: []
body:
- type: markdown
attributes:
value: |
Hi, thanks for raising the issue(-s), all contributions really matter!
Please, note that we'll close the issue without further explanation if you don't follow
this template and don't provide the information requested within this template.
- type: checkboxes
id: terms
attributes:
label: Issue submitter TODO list
description: By you checking these checkboxes we can be sure you've done the essential things.
options:
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
required: true
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
required: true
- label: I've tried running `master`-labeled docker image and the issue still persists there
required: true
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
required: true
- type: textarea
attributes:
label: Describe the bug (actual behavior)
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
validations:
required: true
- type: textarea
attributes:
label: Expected behavior
description: A clear and concise description of what you expected to happen
validations:
required: false
- type: textarea
attributes:
label: Your installation details
description: |
How do you run the app? Please provide as much info as possible:
1. App version (commit hash in the top left corner of the UI)
2. Helm chart version
3. Your application config. Please remove the sensitive info like passwords or API keys.
4. Any IAAC configs
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: |
Please write down the order of the actions required to reproduce the issue.
For the advanced setups/complicated issue, we might need you to provide
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
validations:
required: true
- type: textarea
attributes:
label: Screenshots
description: |
If applicable, add screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Logs
description: |
If applicable, *upload* screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Additional context
description: |
Add any other context about the problem here. E.G.:
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
Were they successful or the same issue occurred? Please provide steps as well.
2. Related issues (if there are any).
3. Logs (if available)
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
validations:
required: false

View file

@ -1,4 +1,4 @@
name: "Infra: Release: AWS Marketplace Publisher"
name: AWS Marketplace Publisher
on:
workflow_dispatch:
inputs:
@ -24,14 +24,14 @@ jobs:
- name: Clone infra repo
run: |
echo "Cloning repo..."
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
echo "Cd to packer DIR..."
cd kafka-ui-infra/ami
echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV
echo "Packer will be triggered in this dir $WORK_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}

View file

@ -1,4 +1,4 @@
name: "Backend: PR/master build & test"
name: Backend build and test
on:
push:
branches:
@ -8,9 +8,6 @@ on:
paths:
- "kafka-ui-api/**"
- "pom.xml"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
runs-on: ubuntu-latest
@ -32,7 +29,7 @@ jobs:
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Build and analyze pull request target
if: ${{ github.event_name == 'pull_request' }}
if: ${{ github.event_name == 'pull_request_target' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}

View file

@ -1,4 +1,4 @@
name: "Infra: PR block merge"
name: Pull Request Labels
on:
pull_request:
types: [opened, labeled, unlabeled, synchronize]
@ -6,7 +6,7 @@ jobs:
block_merge:
runs-on: ubuntu-latest
steps:
- uses: mheap/github-action-required-labels@v5
- uses: mheap/github-action-required-labels@v4
with:
mode: exactly
count: 0

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing: Init env"
name: Feature testing init
on:
workflow_dispatch:
@ -45,7 +45,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -73,33 +73,29 @@ jobs:
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: create deployment
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Branch:${{ needs.build.outputs.tag }}"
./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
- name: update status check for private deployment
- name: make comment with private deployment link
if: ${{ github.event.label.name == 'status/feature_testing' }}
uses: Sibz/github-status-action@v1.1.6
uses: peter-evans/create-or-update-comment@v3
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open custom deployment page"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
- name: update status check for public deployment
- name: make comment with public deployment link
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
uses: Sibz/github-status-action@v1.1.6
uses: peter-evans/create-or-update-comment@v3
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Click Details button to open custom deployment page"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing: Destroy env"
name: Feature testing destroy
on:
workflow_dispatch:
pull_request:
@ -11,12 +11,18 @@ jobs:
- uses: actions/checkout@v3
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: remove env
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
./delete-env.sh pr${{ github.event.pull_request.number }} || true
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui-from-branch/
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
- name: make comment with deployment link
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Custom deployment removed

View file

@ -1,4 +1,4 @@
name: "Infra: Image Testing: Deploy"
name: Build Docker image and push
on:
workflow_dispatch:
pull_request:
@ -42,7 +42,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -70,5 +70,6 @@ jobs:
issue-number: ${{ github.event.pull_request.number }}
body: |
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}

View file

@ -0,0 +1,28 @@
name: Prepare helm release
on:
repository_dispatch:
types: [prepare-helm-release]
jobs:
change-app-version:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- name: Change versions
run: |
git checkout -b release-${{ github.event.client_payload.appversion}}
version=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
version=${version%.*}.$((${version##*.}+1))
sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
git add charts/kafka-ui/Chart.yaml
git commit -m "release ${version}"
git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
- name: Slack Notification
uses: rtCamp/action-slack-notify@v2
env:
SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View file

@ -55,7 +55,7 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.12.0
uses: aquasecurity/trivy-action@0.10.0
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"

View file

@ -1,4 +1,4 @@
name: "Infra: Image Testing: Delete"
name: Delete Public ECR Image
on:
workflow_dispatch:
pull_request:
@ -15,7 +15,7 @@ jobs:
tag='${{ github.event.pull_request.number }}'
echo "tag=${tag}" >> $GITHUB_OUTPUT
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -32,3 +32,9 @@ jobs:
--repository-name kafka-ui-custom-build \
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
--region us-east-1
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed

View file

@ -1,4 +1,4 @@
name: "Infra: Docs: URL linter"
name: Documentation URLs linter
on:
pull_request:
types:

View file

@ -1,4 +1,4 @@
name: "E2E: Automation suite"
name: E2E Automation suite
on:
workflow_dispatch:
inputs:
@ -24,7 +24,7 @@ jobs:
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -1,4 +1,4 @@
name: "E2E: PR healthcheck"
name: E2E PR health check
on:
pull_request_target:
types: [ "opened", "edited", "reopened", "synchronize" ]
@ -8,8 +8,6 @@ on:
- "kafka-ui-react-app/**"
- "kafka-ui-e2e-checks/**"
- "pom.xml"
permissions:
statuses: write
jobs:
build-and-test:
runs-on: ubuntu-latest
@ -18,10 +16,10 @@ jobs:
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values
@ -47,7 +45,7 @@ jobs:
# use the following command until #819 will be fixed
run: |
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d && until [ "$(docker exec kafka-ui wget --spider --server-response http://localhost:8080/actuator/health 2>&1 | grep -c 'HTTP/1.1 200 OK')" == "1" ]; do echo "Waiting for kafka-ui ..." && sleep 1; done
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
- name: Run test suite
run: |
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}

View file

@ -1,4 +1,4 @@
name: "E2E: Manual suite"
name: E2E Manual suite
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: "E2E: Weekly suite"
name: E2E Weekly suite
on:
schedule:
- cron: '0 1 * * 1'
@ -11,7 +11,7 @@ jobs:
with:
ref: ${{ github.sha }}
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -1,4 +1,4 @@
name: "Frontend: PR/master build & test"
name: Frontend build and test
on:
push:
branches:
@ -8,9 +8,6 @@ on:
paths:
- "kafka-ui-contract/**"
- "kafka-ui-react-app/**"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
env:
@ -23,13 +20,13 @@ jobs:
# Disabling shallow clone is recommended for improving relevancy of reporting
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- uses: pnpm/action-setup@v2.4.0
- uses: pnpm/action-setup@v2.2.4
with:
version: 8.6.12
version: 7.4.0
- name: Install node
uses: actions/setup-node@v3.8.1
uses: actions/setup-node@v3.6.0
with:
node-version: "18.17.1"
node-version: "16.15.0"
cache: "pnpm"
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
- name: Install Node dependencies
@ -49,7 +46,7 @@ jobs:
cd kafka-ui-react-app/
pnpm test:CI
- name: SonarCloud Scan
uses: sonarsource/sonarcloud-github-action@master
uses: workshur/sonarcloud-github-action@improved_basedir
with:
projectBaseDir: ./kafka-ui-react-app
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}

38
.github/workflows/helm.yaml vendored Normal file
View file

@ -0,0 +1,38 @@
name: Helm linter
on:
pull_request:
types: ["opened", "edited", "reopened", "synchronize"]
branches:
- 'master'
paths:
- "charts/**"
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Helm tool installer
uses: Azure/setup-helm@v3
- name: Setup Kubeval
uses: lra/setup-kubeval@v1.0.1
#check, was helm version increased in Chart.yaml?
- name: Check version
shell: bash
run: |
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
echo $helm_version_old
echo $helm_version_new
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
- name: Run kubeval
shell: bash
run: |
sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e '^v1\.1[0-7]\{1\}' | cut -c2-)
echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
echo ""
for version in $K8S_VERSIONS
do
echo $version;
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
done

View file

@ -1,4 +1,4 @@
name: "Master: Build & deploy"
name: Master branch build & deploy
on:
workflow_dispatch:
push:
@ -58,7 +58,6 @@ jobs:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@ -74,11 +73,11 @@ jobs:
#################################
- name: update-master-deployment
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch master
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add ../kafka-ui/*
git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push

View file

@ -1,14 +1,13 @@
name: "PR: Checklist linter"
name: "PR Checklist checked"
on:
pull_request_target:
types: [opened, edited, synchronize, reopened]
permissions:
checks: write
jobs:
task-check:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/task-completed-checker-action@v0.1.2
- uses: kentaro-m/task-completed-checker-action@v0.1.1
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- uses: dekinderfiets/pr-description-enforcer@0.0.1

39
.github/workflows/release-helm.yaml vendored Normal file
View file

@ -0,0 +1,39 @@
name: Release helm
on:
push:
branches:
- master
paths:
- "charts/**"
jobs:
release-helm:
runs-on:
ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 1
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- uses: azure/setup-helm@v3
- name: add chart #realse helm with new version
run: |
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
MSG=$(helm package charts/kafka-ui)
git fetch origin
git stash
git checkout -b gh-pages origin/gh-pages
git pull
helm repo index .
git add -f ${MSG##*/} index.yaml
git commit -m "release ${VERSION}"
git push
- uses: rickstaa/action-create-tag@v1 #create new tag
with:
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"

View file

@ -1,4 +1,4 @@
name: "Infra: Release: Serde API"
name: Release serde api
on: workflow_dispatch
jobs:

View file

@ -1,4 +1,4 @@
name: "Infra: Release"
name: Release
on:
release:
types: [published]
@ -34,7 +34,7 @@ jobs:
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Upload files to a GitHub release
uses: svenstaro/upload-release-action@2.7.0
uses: svenstaro/upload-release-action@2.5.0
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
@ -77,7 +77,6 @@ jobs:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@ -89,12 +88,14 @@ jobs:
charts:
runs-on: ubuntu-latest
permissions:
contents: write
needs: release
steps:
- name: Repository Dispatch
uses: peter-evans/repository-dispatch@v2
with:
token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
repository: provectus/kafka-ui-charts
token: ${{ secrets.GITHUB_TOKEN }}
repository: provectus/kafka-ui
event-type: prepare-helm-release
client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'

View file

@ -1,4 +1,4 @@
name: "Infra: Release Drafter run"
name: Release Drafter
on:
push:

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing Public: Init env"
name: Separate environment create
on:
workflow_dispatch:
inputs:
@ -47,7 +47,7 @@ jobs:
restore-keys: |
${{ runner.os }}-buildx-
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -76,14 +76,14 @@ jobs:
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: separate env create
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add -A
git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true

View file

@ -1,4 +1,4 @@
name: "Infra: Feature Testing Public: Destroy env"
name: Separate environment remove
on:
workflow_dispatch:
inputs:
@ -13,12 +13,12 @@ jobs:
steps:
- name: clone
run: |
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
- name: separate environment remove
run: |
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
git config --global user.email "infra-tech@provectus.com"
git config --global user.name "infra-tech"
git config --global user.email "kafka-ui-infra@provectus.com"
git config --global user.name "kafka-ui-infra"
git add -A
git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true

View file

@ -1,4 +1,4 @@
name: 'Infra: Close stale issues'
name: 'Close stale issues'
on:
schedule:
- cron: '30 1 * * *'

View file

@ -1,4 +1,4 @@
name: "Infra: Terraform deploy"
name: Terraform deploy
on:
workflow_dispatch:
inputs:
@ -26,7 +26,7 @@ jobs:
echo "Terraform will be triggered in this dir $TF_DIR"
- name: Configure AWS credentials for Kafka-UI account
uses: aws-actions/configure-aws-credentials@v3
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View file

@ -1,4 +1,4 @@
name: "Infra: Triage: Apply triage label for issues"
name: Add triage label to new issues
on:
issues:
types:

View file

@ -1,4 +1,4 @@
name: "Infra: Triage: Apply triage label for PRs"
name: Add triage label to new PRs
on:
pull_request:
types:

View file

@ -7,9 +7,7 @@ on:
issues:
types:
- opened
permissions:
issues: write
pull-requests: write
jobs:
welcome:
runs-on: ubuntu-latest

View file

@ -1,4 +1,4 @@
name: "Infra: Workflow linter"
name: "Workflow linter"
on:
pull_request:
types:

3
.gitignore vendored
View file

@ -31,9 +31,6 @@ build/
.vscode/
/kafka-ui-api/app/node
### SDKMAN ###
.sdkmanrc
.DS_Store
*.code-workspace

View file

@ -18,10 +18,6 @@
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
</p>
<p align="center">
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
</p>
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
@ -91,7 +87,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
Then access the web UI at [http://localhost:8080](http://localhost:8080)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
## Persistent installation
@ -103,7 +99,7 @@ services:
ports:
- 8080:8080
environment:
DYNAMIC_CONFIG_ENABLED: 'true'
DYNAMIC_CONFIG_ENABLED: true
volumes:
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
```

View file

@ -6,8 +6,7 @@ Following versions of the project are currently being supported with security up
| Version | Supported |
| ------- | ------------------ |
| 0.7.x | :white_check_mark: |
| 0.6.x | :x: |
| 0.6.x | :white_check_mark: |
| 0.5.x | :x: |
| 0.4.x | :x: |
| 0.3.x | :x: |

View file

@ -0,0 +1,25 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
example/
README.md

View file

@ -0,0 +1,7 @@
apiVersion: v2
name: kafka-ui
description: A Helm chart for kafka-UI
type: application
version: 0.6.2
appVersion: v0.6.2
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

View file

@ -0,0 +1 @@
Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.

View file

@ -0,0 +1,3 @@
apiVersion: v1
entries: {}
generated: "2021-11-11T12:26:08.479581+03:00"

View file

@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
{{- end }}

View file

@ -0,0 +1,84 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kafka-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kafka-ui.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kafka-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kafka-ui.labels" -}}
helm.sh/chart: {{ include "kafka-ui.chart" . }}
{{ include "kafka-ui.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kafka-ui.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kafka-ui.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
This allows us to check if the registry of the image is specified or not.
*/}}
{{- define "kafka-ui.imageName" -}}
{{- $registryName := .Values.image.registry -}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- $registryName = .Values.global.imageRegistry -}}
{{- end -}}
{{- end -}}
{{- $repository := .Values.image.repository -}}
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
{{- if $registryName }}
{{- printf "%s/%s:%s" $registryName $repository $tag -}}
{{- else }}
{{- printf "%s:%s" $repository $tag -}}
{{- end }}
{{- end -}}

View file

@ -0,0 +1,10 @@
{{- if .Values.envs.config -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
{{- toYaml .Values.envs.config | nindent 2 }}
{{- end -}}

View file

@ -0,0 +1,11 @@
{{- if .Values.yamlApplicationConfig -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}-fromvalues
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
config.yml: |-
{{- toYaml .Values.yamlApplicationConfig | nindent 4}}
{{ end }}

View file

@ -0,0 +1,150 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: {{ include "kafka-ui.imageName" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
- name: SPRING_CONFIG_ADDITIONAL-LOCATION
{{- if .Values.yamlApplicationConfig }}
value: /kafka-ui/config.yml
{{- else if .Values.yamlApplicationConfigConfigMap }}
value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
{{- end }}
{{- end }}
{{- end }}
envFrom:
{{- if .Values.existingConfigMap }}
- configMapRef:
name: {{ .Values.existingConfigMap }}
{{- end }}
{{- if .Values.envs.config }}
- configMapRef:
name: {{ include "kafka-ui.fullname" . }}
{{- end }}
{{- if .Values.existingSecret }}
- secretRef:
name: {{ .Values.existingSecret }}
{{- end }}
{{- if .Values.envs.secret}}
- secretRef:
name: {{ include "kafka-ui.fullname" . }}
{{- end}}
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
{{- if .Values.probes.useHttpsScheme }}
scheme: HTTPS
{{- end }}
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
readinessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
{{- if .Values.probes.useHttpsScheme }}
scheme: HTTPS
{{- end }}
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
volumeMounts:
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.yamlApplicationConfig }}
- name: kafka-ui-yaml-conf
mountPath: /kafka-ui/
{{- end }}
{{- if .Values.yamlApplicationConfigConfigMap}}
- name: kafka-ui-yaml-conf-configmap
mountPath: /kafka-ui/
{{- end }}
{{- end }}
{{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
volumes:
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.yamlApplicationConfig }}
- name: kafka-ui-yaml-conf
configMap:
name: {{ include "kafka-ui.fullname" . }}-fromvalues
{{- end }}
{{- if .Values.yamlApplicationConfigConfigMap}}
- name: kafka-ui-yaml-conf-configmap
configMap:
name: {{ .Values.yamlApplicationConfigConfigMap.name }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,46 @@
{{- if .Values.autoscaling.enabled }}
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
apiVersion: autoscaling/v2
{{- else }}
apiVersion: autoscaling/v2beta1
{{- end }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "kafka-ui.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
{{- if $isHigher1p25 }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- else }}
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
{{- if $isHigher1p25 }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- else }}
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,89 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "kafka-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
apiVersion: networking.k8s.io/v1
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls.enabled }}
tls:
- hosts:
- {{ tpl .Values.ingress.host . }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- http:
paths:
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
pathType: {{ .Values.ingress.pathType }}
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
- backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
pathType: {{ .Values.ingress.pathType }}
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
pathType: {{ .Values.ingress.pathType }}
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{tpl .Values.ingress.host . }}
{{- end }}
{{- else -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
- backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{ tpl .Values.ingress.host . }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
{{- if .Values.networkPolicy.egressRules.customRules }}
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
ingress:
{{- if .Values.networkPolicy.ingressRules.customRules }}
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,13 @@
{{- if .Values.envs.secret -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $key, $val := .Values.envs.secret }}
{{ $key }}: {{ $val | b64enc | quote }}
{{- end -}}
{{- end}}

View file

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}

View file

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kafka-ui.serviceAccountName" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

161
charts/kafka-ui/values.yaml Normal file
View file

@ -0,0 +1,161 @@
replicaCount: 1
image:
registry: docker.io
repository: provectuslabs/kafka-ui
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
existingConfigMap: ""
yamlApplicationConfig:
{}
# kafka:
# clusters:
# - name: yaml
# bootstrapServers: kafka-service:9092
# spring:
# security:
# oauth2:
# auth:
# type: disabled
# management:
# health:
# ldap:
# enabled: false
yamlApplicationConfigConfigMap:
{}
# keyName: config.yml
# name: configMapName
existingSecret: ""
envs:
secret: {}
config: {}
networkPolicy:
enabled: false
egressRules:
## Additional custom egress rules
## e.g:
## customRules:
## - to:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
ingressRules:
## Additional custom ingress rules
## e.g:
## customRules:
## - from:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
podAnnotations: {}
podLabels: {}
## Annotations to be added to kafka-ui Deployment
##
annotations: {}
## Set field schema as HTTPS for readines and liveness probe
##
probes:
useHttpsScheme: false
podSecurityContext:
{}
# fsGroup: 2000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
# Ingress configuration
ingress:
# Enable ingress resource
enabled: false
# Annotations for the Ingress
annotations: {}
# ingressClassName for the Ingress
ingressClassName: ""
# The path for the Ingress
path: "/"
# The path type for the Ingress
pathType: "Prefix"
# The hostname for the Ingress
host: ""
# configs for Ingress TLS
tls:
# Enable TLS termination for the Ingress
enabled: false
# the name of a pre-created Secret containing a TLS private key and certificate
secretName: ""
# HTTP paths to add to the Ingress before the default path
precedingPaths: []
# Http paths to add to the Ingress after the default path
succeedingPaths: []
resources:
{}
# limits:
# cpu: 200m
# memory: 512Mi
# requests:
# cpu: 200m
# memory: 256Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
env: {}
initContainers: {}
volumeMounts: {}
volumes: {}

View file

@ -8,9 +8,9 @@
6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafkas JMX with SSL and authentication.
9. [kafka-ui-reverse-proxy.yaml](./nginx-proxy.yaml) - An example for using the app behind a proxy (like nginx).
9. [kafka-ui-reverse-proxy.yaml](./kafka-ui-reverse-proxy.yaml) - An example for using the app behind a proxy (like nginx).
10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
11. [kafka-ui-traefik-proxy.yaml](./traefik-proxy.yaml) - Traefik specific proxy configuration.
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper

View file

@ -15,23 +15,26 @@ services:
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
AUTH_TYPE: "LDAP"
SPRING_LDAP_URLS: "ldap://ldap:10389"
SPRING_LDAP_BASE: "cn={0},ou=people,dc=planetexpress,dc=com"
SPRING_LDAP_ADMIN_USER: "cn=admin,dc=planetexpress,dc=com"
SPRING_LDAP_ADMIN_PASSWORD: "GoodNewsEveryone"
SPRING_LDAP_USER_FILTER_SEARCH_BASE: "dc=planetexpress,dc=com"
SPRING_LDAP_USER_FILTER_SEARCH_FILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
SPRING_LDAP_GROUP_FILTER_SEARCH_BASE: "ou=people,dc=planetexpress,dc=com"
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
# ===== USER SEARCH FILTER INSTEAD OF DN =====
# SPRING_LDAP_USERFILTER_SEARCHBASE: "dc=planetexpress,dc=com"
# SPRING_LDAP_USERFILTER_SEARCHFILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
# LDAP ADMIN USER
# SPRING_LDAP_ADMINUSER: "cn=admin,dc=planetexpress,dc=com"
# SPRING_LDAP_ADMINPASSWORD: "GoodNewsEveryone"
# ===== ACTIVE DIRECTORY =====
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
ldap:
image: rroemhild/test-openldap:latest
hostname: "ldap"
ports:
- 10389:10389
kafka0:
image: confluentinc/cp-kafka:7.2.1
@ -76,4 +79,4 @@ services:
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas

View file

@ -124,7 +124,7 @@ services:
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
kafka0:
condition: service_healthy
@ -187,4 +187,4 @@ services:
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
KSQL_KSQL_SERVICE_ID: my_ksql_1
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
KSQL_CACHE_MAX_BYTES_BUFFERING: 0

View file

@ -1,2 +1,2 @@
rules:
- pattern: ".*"
- pattern: ".*"

View file

@ -57,7 +57,7 @@ services:
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
@ -80,4 +80,4 @@ services:
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein

View file

@ -0,0 +1,84 @@
---
version: "2"
services:
kafka0:
image: confluentinc/cp-kafka:7.2.1
hostname: kafka0
container_name: kafka0
ports:
- "9092:9092"
- "9997:9997"
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_JMX_PORT: 9997
KAFKA_JMX_HOSTNAME: localhost
KAFKA_PROCESS_ROLES: "broker,controller"
KAFKA_NODE_ID: 1
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
volumes:
- ./scripts/update_run_cluster.sh:/tmp/update_run.sh
- ./scripts/clusterID:/tmp/clusterID
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
schemaregistry0:
image: confluentinc/cp-schema-registry:7.2.1
depends_on:
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
ports:
- 8085:8085
kafka-connect0:
image: confluentinc/cp-kafka-connect:7.2.1
ports:
- 8083:8083
depends_on:
- kafka0
- schemaregistry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"

View file

@ -20,8 +20,6 @@ services:
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
kafka0:
image: confluentinc/cp-kafka:7.2.1.arm64
@ -95,7 +93,7 @@ services:
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1.arm64
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \

View file

@ -69,7 +69,7 @@ services:
build:
context: ./kafka-connect
args:
image: confluentinc/cp-kafka-connect:7.2.1
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
@ -104,7 +104,7 @@ services:
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \

View file

@ -4,7 +4,7 @@ services:
nginx:
image: nginx:latest
volumes:
- ./data/proxy.conf:/etc/nginx/conf.d/default.conf
- ./proxy.conf:/etc/nginx/conf.d/default.conf
ports:
- 8080:80

View file

@ -115,7 +115,7 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
image: confluentinc/cp-kafka-connect:7.2.1
image: confluentinc/cp-kafka-connect:6.0.1
ports:
- 8083:8083
depends_on:
@ -142,7 +142,7 @@ services:
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \

View file

@ -38,7 +38,7 @@ services:
kafka-init-topics:
image: confluentinc/cp-kafka:7.2.1
volumes:
- ./data/message.json:/data/message.json
- ./message.json:/data/message.json
depends_on:
- kafka
command: "bash -c 'echo Waiting for Kafka to be ready... && \

View file

@ -0,0 +1,22 @@
---
version: '3.4'
services:
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:local
ports:
- 8080:8080
depends_on:
- kafka0 # OMITTED, TAKE UP AN EXAMPLE FROM OTHER COMPOSE FILES
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
AUTH_TYPE: OAUTH2_COGNITO
AUTH_COGNITO_ISSUER_URI: "https://cognito-idp.eu-central-1.amazonaws.com/eu-central-xxxxxx"
AUTH_COGNITO_CLIENT_ID: ""
AUTH_COGNITO_CLIENT_SECRET: ""
AUTH_COGNITO_SCOPE: "openid"
AUTH_COGNITO_USER_NAME_ATTRIBUTE: "username"
AUTH_COGNITO_LOGOUT_URI: "https://<domain>.auth.eu-central-1.amazoncognito.com/logout"

View file

@ -1,11 +1,7 @@
#FROM azul/zulu-openjdk-alpine:17-jre-headless
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
RUN apk add --no-cache \
# snappy codec
gcompat \
# configuring timezones
tzdata
RUN apk add --no-cache gcompat # need to make snappy codec work
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
# creating folder for dynamic config usage (certificates uploads, etc)

View file

@ -81,12 +81,6 @@
<groupId>io.confluent</groupId>
<artifactId>kafka-json-schema-serializer</artifactId>
<version>${confluent.version}</version>
<exclusions>
<exclusion>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.confluent</groupId>
@ -97,7 +91,7 @@
<dependency>
<groupId>software.amazon.msk</groupId>
<artifactId>aws-msk-iam-auth</artifactId>
<version>1.1.7</version>
<version>1.1.6</version>
</dependency>
<dependency>
@ -120,11 +114,6 @@
<artifactId>json</artifactId>
<version>${org.json.version}</version>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
@ -141,11 +130,6 @@
<artifactId>commons-pool2</artifactId>
<version>${apache.commons.version}</version>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-collections4</artifactId>
<version>4.4</version>
</dependency>
<dependency>
<groupId>org.testcontainers</groupId>
<artifactId>testcontainers</artifactId>
@ -249,6 +233,8 @@
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-ldap</artifactId>
</dependency>
<dependency>
<groupId>org.codehaus.groovy</groupId>
<artifactId>groovy-jsr223</artifactId>
@ -325,7 +311,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.3.0</version>
<version>3.1.2</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>
@ -403,7 +389,7 @@
<plugin>
<groupId>pl.project13.maven</groupId>
<artifactId>git-commit-id-plugin</artifactId>
<version>4.9.10</version>
<version>4.0.0</version>
<executions>
<execution>
<id>get-the-git-infos</id>

View file

@ -51,12 +51,13 @@ public class ClustersProperties {
List<Masking> masking;
Long pollingThrottleRate;
TruststoreConfig ssl;
AuditProperties audit;
}
@Data
public static class PollingProperties {
Integer pollTimeoutMs;
Integer partitionPollTimeout;
Integer noDataEmptyPolls;
Integer maxPageSize;
Integer defaultPageSize;
}
@ -142,23 +143,6 @@ public class ClustersProperties {
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class AuditProperties {
String topic;
Integer auditTopicsPartitions;
Boolean topicAuditEnabled;
Boolean consoleAuditEnabled;
LogLevel level;
Map<String, String> auditTopicProperties;
public enum LogLevel {
ALL,
ALTER_ONLY //default
}
}
@PostConstruct
public void validateAndSetDefaults() {
if (clusters != null) {

View file

@ -7,6 +7,8 @@ import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpResponse;
import org.springframework.web.reactive.config.CorsRegistry;
import org.springframework.web.reactive.config.WebFluxConfigurer;
import org.springframework.web.server.ServerWebExchange;
import org.springframework.web.server.WebFilter;
import org.springframework.web.server.WebFilterChain;

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.config;
import com.provectus.kafka.ui.exception.ValidationException;
import java.beans.Transient;
import javax.annotation.PostConstruct;
import lombok.Data;
import org.springframework.boot.context.properties.ConfigurationProperties;

View file

@ -13,7 +13,6 @@ abstract class AbstractAuthSecurityConfig {
"/resources/**",
"/actuator/health/**",
"/actuator/info",
"/actuator/prometheus",
"/auth",
"/login",
"/logout",

View file

@ -1,6 +1,7 @@
package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import lombok.Value;
public record AuthenticatedUser(String principal, Collection<String> groups) {

View file

@ -6,13 +6,13 @@ import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.HttpMethod;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
import org.springframework.security.config.web.server.ServerHttpSecurity;
import org.springframework.security.web.server.SecurityWebFilterChain;
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
@Configuration
@EnableWebFluxSecurity
@ -33,19 +33,15 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
return http.authorizeExchange(spec -> spec
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
)
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
.logout(spec -> spec
.logoutSuccessHandler(logoutSuccessHandler)
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
.csrf(ServerHttpSecurity.CsrfSpec::disable)
.build();
return http
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
.csrf().disable()
.authorizeExchange()
.pathMatchers(AUTH_WHITELIST).permitAll()
.anyExchange().authenticated()
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
.and().build();
}
}

View file

@ -27,12 +27,10 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
System.exit(1);
}
log.warn("Authentication is disabled. Access will be unrestricted.");
return http.authorizeExchange(spec -> spec
.anyExchange()
.permitAll()
)
.csrf(ServerHttpSecurity.CsrfSpec::disable)
return http.authorizeExchange()
.anyExchange().permitAll()
.and()
.csrf().disable()
.build();
}

View file

@ -24,7 +24,6 @@ import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.ProviderManager;
import org.springframework.security.authentication.ReactiveAuthenticationManager;
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
import org.springframework.security.config.Customizer;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.ServerHttpSecurity;
import org.springframework.security.core.GrantedAuthority;
@ -127,15 +126,21 @@ public class LdapSecurityConfig {
log.info("Active Directory support for LDAP has been enabled.");
}
return http.authorizeExchange(spec -> spec
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
)
.formLogin(Customizer.withDefaults())
.logout(Customizer.withDefaults())
.csrf(ServerHttpSecurity.CsrfSpec::disable)
return http
.authorizeExchange()
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
.and()
.formLogin()
.and()
.logout()
.and()
.csrf().disable()
.build();
}

View file

@ -12,11 +12,10 @@ import lombok.extern.log4j.Log4j2;
import org.jetbrains.annotations.Nullable;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.config.Customizer;
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.ServerHttpSecurity;
@ -50,15 +49,21 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
log.info("Configuring OAUTH2 authentication.");
return http.authorizeExchange(spec -> spec
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
)
.oauth2Login(Customizer.withDefaults())
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
.csrf(ServerHttpSecurity.CsrfSpec::disable)
return http.authorizeExchange()
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
.and()
.oauth2Login()
.and()
.logout()
.logoutSuccessHandler(logoutHandler)
.and()
.csrf().disable()
.build();
}
@ -98,10 +103,7 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
final List<ClientRegistration> registrations =
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
if (registrations.isEmpty()) {
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
}
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
return new InMemoryReactiveClientRegistrationRepository(registrations);
}

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import java.util.Map;
import lombok.Value;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.oauth2.core.user.OAuth2User;

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
import java.util.Collection;
import java.util.Map;
import lombok.Value;
import org.springframework.security.core.GrantedAuthority;
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;

View file

@ -1,14 +1,13 @@
package com.provectus.kafka.ui.config.auth.condition;
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
import org.jetbrains.annotations.NotNull;
import org.springframework.context.annotation.Condition;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.core.type.AnnotatedTypeMetadata;
public class CognitoCondition extends AbstractProviderCondition implements Condition {
@Override
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
}
}
}

View file

@ -2,19 +2,12 @@ package com.provectus.kafka.ui.controller;
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
import com.provectus.kafka.ui.model.KafkaCluster;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.ClustersStorage;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import org.springframework.beans.factory.annotation.Autowired;
import reactor.core.publisher.Mono;
import reactor.core.publisher.Signal;
public abstract class AbstractController {
protected ClustersStorage clustersStorage;
protected AccessControlService accessControlService;
protected AuditService auditService;
private ClustersStorage clustersStorage;
protected KafkaCluster getCluster(String name) {
return clustersStorage.getClusterByName(name)
@ -22,26 +15,8 @@ public abstract class AbstractController {
String.format("Cluster with name '%s' not found", name)));
}
protected Mono<Void> validateAccess(AccessContext context) {
return accessControlService.validateAccess(context);
}
protected void audit(AccessContext acxt, Signal<?> sig) {
auditService.audit(acxt, sig);
}
@Autowired
public void setClustersStorage(ClustersStorage clustersStorage) {
this.clustersStorage = clustersStorage;
}
@Autowired
public void setAccessControlService(AccessControlService accessControlService) {
this.accessControlService = accessControlService;
}
@Autowired
public void setAuditService(AuditService auditService) {
this.auditService = auditService;
}
}

View file

@ -13,6 +13,7 @@ import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.stream.Collectors;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -37,7 +38,7 @@ public class AccessController implements AuthorizationApi {
.filter(role -> user.groups().contains(role.getName()))
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
.flatMap(Collection::stream)
.toList()
.collect(Collectors.toList())
)
.switchIfEmpty(Mono.just(Collections.emptyList()));
@ -69,10 +70,10 @@ public class AccessController implements AuthorizationApi {
.map(String::toUpperCase)
.map(this::mapAction)
.filter(Objects::nonNull)
.toList());
.collect(Collectors.toList()));
return dto;
})
.toList();
.collect(Collectors.toList());
}
@Nullable

View file

@ -2,15 +2,13 @@ package com.provectus.kafka.ui.controller;
import com.provectus.kafka.ui.api.AclsApi;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
import com.provectus.kafka.ui.model.KafkaAclDTO;
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
import com.provectus.kafka.ui.service.acl.AclsService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
import org.apache.kafka.common.resource.PatternType;
@ -27,6 +25,7 @@ import reactor.core.publisher.Mono;
public class AclsController extends AbstractController implements AclsApi {
private final AclsService aclsService;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
@ -34,14 +33,12 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createAcl")
.build();
return validateAccess(context)
return accessControlService.validateAccess(context)
.then(kafkaAclDto)
.map(ClusterMapper::toAclBinding)
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@ -51,14 +48,12 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("deleteAcl")
.build();
return validateAccess(context)
return accessControlService.validateAccess(context)
.then(kafkaAclDto)
.map(ClusterMapper::toAclBinding)
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@ -71,7 +66,6 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.VIEW)
.operationName("listAcls")
.build();
var resourceType = Optional.ofNullable(resourceTypeDto)
@ -84,12 +78,12 @@ public class AclsController extends AbstractController implements AclsApi {
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
return validateAccess(context).then(
return accessControlService.validateAccess(context).then(
Mono.just(
ResponseEntity.ok(
aclsService.listAcls(getCluster(clusterName), filter)
.map(ClusterMapper::toKafkaAclDto)))
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -97,14 +91,12 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.VIEW)
.operationName("getAclAsCsv")
.build();
return validateAccess(context).then(
return accessControlService.validateAccess(context).then(
aclsService.getAclAsCsvString(getCluster(clusterName))
.map(ResponseEntity::ok)
.flatMap(Mono::just)
.doOnEach(sig -> audit(context, sig))
);
}
@ -113,64 +105,11 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("syncAclsCsv")
.build();
return validateAccess(context)
return accessControlService.validateAccess(context)
.then(csvMono)
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
Mono<CreateConsumerAclDTO> createConsumerAclDto,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createConsumerAcl")
.build();
return validateAccess(context)
.then(createConsumerAclDto)
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
Mono<CreateProducerAclDTO> createProducerAclDto,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createProducerAcl")
.build();
return validateAccess(context)
.then(createProducerAclDto)
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createStreamAppAcl")
.build();
return validateAccess(context)
.then(createStreamAppAclDto)
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
}

View file

@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.ApplicationInfoService;
import com.provectus.kafka.ui.service.KafkaClusterFactory;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import com.provectus.kafka.ui.util.ApplicationRestarter;
import com.provectus.kafka.ui.util.DynamicConfigOperations;
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
@ -37,7 +38,7 @@ import reactor.util.function.Tuples;
@Slf4j
@RestController
@RequiredArgsConstructor
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
public class ApplicationConfigController implements ApplicationConfigApi {
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
@ -49,6 +50,7 @@ public class ApplicationConfigController extends AbstractController implements A
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
}
private final AccessControlService accessControlService;
private final DynamicConfigOperations dynamicConfigOperations;
private final ApplicationRestarter restarter;
private final KafkaClusterFactory kafkaClusterFactory;
@ -61,69 +63,62 @@ public class ApplicationConfigController extends AbstractController implements A
@Override
public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
var context = AccessContext.builder()
.applicationConfigActions(VIEW)
.operationName("getCurrentConfig")
.build();
return validateAccess(context)
return accessControlService
.validateAccess(
AccessContext.builder()
.applicationConfigActions(VIEW)
.build()
)
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
new ApplicationConfigDTO()
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
)))
.doOnEach(sig -> audit(context, sig));
)));
}
@Override
public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
ServerWebExchange exchange) {
var context = AccessContext.builder()
.applicationConfigActions(EDIT)
.operationName("restartWithConfig")
.build();
return validateAccess(context)
return accessControlService
.validateAccess(
AccessContext.builder()
.applicationConfigActions(EDIT)
.build()
)
.then(restartRequestDto)
.doOnNext(restartDto -> {
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
dynamicConfigOperations.persist(newConfig);
})
.doOnEach(sig -> audit(context, sig))
.doOnSuccess(dto -> restarter.requestRestart())
.map(dto -> ResponseEntity.ok().build());
.map(dto -> {
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
restarter.requestRestart();
return ResponseEntity.ok().build();
});
}
@Override
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
ServerWebExchange exchange) {
var context = AccessContext.builder()
.applicationConfigActions(EDIT)
.operationName("uploadConfigRelatedFile")
.build();
return validateAccess(context)
return accessControlService
.validateAccess(
AccessContext.builder()
.applicationConfigActions(EDIT)
.build()
)
.then(fileFlux.single())
.flatMap(file ->
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
.map(ResponseEntity::ok))
.doOnEach(sig -> audit(context, sig));
.map(ResponseEntity::ok));
}
@Override
public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
ServerWebExchange exchange) {
var context = AccessContext.builder()
.applicationConfigActions(EDIT)
.operationName("validateConfig")
.build();
return validateAccess(context)
.then(configDto)
return configDto
.flatMap(config -> {
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = newConfig.getKafka();
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = propertiesStructure.getKafka();
return validateClustersConfig(clustersProperties)
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
})
.map(ResponseEntity::ok)
.doOnEach(sig -> audit(context, sig));
.map(ResponseEntity::ok);
}
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(

View file

@ -36,10 +36,10 @@ public class AuthController {
+ " <meta name=\"description\" content=\"\">\n"
+ " <meta name=\"author\" content=\"\">\n"
+ " <title>Please sign in</title>\n"
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
+ "crossorigin=\"anonymous\">\n"
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
+ " <link href=\"/static/css/signin.css\" "
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
+ " </head>\n"
+ " <body>\n"

View file

@ -11,9 +11,8 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
import com.provectus.kafka.ui.service.BrokerService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.ResponseEntity;
@ -26,79 +25,63 @@ import reactor.core.publisher.Mono;
@RequiredArgsConstructor
@Slf4j
public class BrokersController extends AbstractController implements BrokersApi {
private static final String BROKER_ID = "brokerId";
private final BrokerService brokerService;
private final ClusterMapper clusterMapper;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.operationName("getBrokers")
.build();
.build());
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
return validateAccess(context)
.thenReturn(ResponseEntity.ok(job))
.doOnEach(sig -> audit(context, sig));
return validateAccess.thenReturn(ResponseEntity.ok(job));
}
@Override
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.operationName("getBrokersMetrics")
.operationParams(Map.of("id", id))
.build();
.build());
return validateAccess(context)
.then(
brokerService.getBrokerMetrics(getCluster(clusterName), id)
.map(clusterMapper::toBrokerMetrics)
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
)
.doOnEach(sig -> audit(context, sig));
return validateAccess.then(
brokerService.getBrokerMetrics(getCluster(clusterName), id)
.map(clusterMapper::toBrokerMetrics)
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
);
}
@Override
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
@Nullable List<Integer> brokers,
List<Integer> brokers,
ServerWebExchange exchange) {
List<Integer> brokerIds = brokers == null ? List.of() : brokers;
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.operationName("getAllBrokersLogdirs")
.operationParams(Map.of("brokerIds", brokerIds))
.build();
.build());
return validateAccess(context)
.thenReturn(ResponseEntity.ok(
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
.doOnEach(sig -> audit(context, sig));
return validateAccess.thenReturn(ResponseEntity.ok(
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
}
@Override
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
Integer id,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW)
.operationName("getBrokerConfig")
.operationParams(Map.of(BROKER_ID, id))
.build();
.build());
return validateAccess(context).thenReturn(
return validateAccess.thenReturn(
ResponseEntity.ok(
brokerService.getBrokerConfig(getCluster(clusterName), id)
.map(clusterMapper::toBrokerConfig))
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -106,18 +89,16 @@ public class BrokersController extends AbstractController implements BrokersApi
Integer id,
Mono<BrokerLogdirUpdateDTO> brokerLogdir,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.operationName("updateBrokerTopicPartitionLogDir")
.operationParams(Map.of(BROKER_ID, id))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
brokerLogdir
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -126,18 +107,16 @@ public class BrokersController extends AbstractController implements BrokersApi
String name,
Mono<BrokerConfigItemDTO> brokerConfig,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.operationName("updateBrokerConfigByName")
.operationParams(Map.of(BROKER_ID, id))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
brokerConfig
.flatMap(bci -> brokerService.updateBrokerConfigByName(
getCluster(clusterName), id, name, bci.getValue()))
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
}

View file

@ -6,6 +6,7 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
import com.provectus.kafka.ui.model.ClusterStatsDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.ClusterService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.ResponseEntity;
@ -19,6 +20,7 @@ import reactor.core.publisher.Mono;
@Slf4j
public class ClustersController extends AbstractController implements ClustersApi {
private final ClusterService clusterService;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
@ -33,16 +35,14 @@ public class ClustersController extends AbstractController implements ClustersAp
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("getClusterMetrics")
.build();
return validateAccess(context)
return accessControlService.validateAccess(context)
.then(
clusterService.getClusterMetrics(getCluster(clusterName))
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
)
.doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -50,16 +50,14 @@ public class ClustersController extends AbstractController implements ClustersAp
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("getClusterStats")
.build();
return validateAccess(context)
return accessControlService.validateAccess(context)
.then(
clusterService.getClusterStats(getCluster(clusterName))
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
)
.doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -68,11 +66,11 @@ public class ClustersController extends AbstractController implements ClustersAp
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("updateClusterInfo")
.build();
return validateAccess(context)
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
.doOnEach(sig -> audit(context, sig));
return accessControlService.validateAccess(context)
.then(
clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
);
}
}

View file

@ -19,9 +19,11 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import com.provectus.kafka.ui.service.ConsumerGroupService;
import com.provectus.kafka.ui.service.OffsetsResetService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
@ -39,6 +41,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
private final ConsumerGroupService consumerGroupService;
private final OffsetsResetService offsetsResetService;
private final AccessControlService accessControlService;
@Value("${consumer.groups.page.size:25}")
private int defaultConsumerGroupsPageSize;
@ -47,47 +50,44 @@ public class ConsumerGroupsController extends AbstractController implements Cons
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
String id,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.consumerGroup(id)
.consumerGroupActions(DELETE)
.operationName("deleteConsumerGroup")
.build();
.build());
return validateAccess(context)
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
return validateAccess.then(
consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
.thenReturn(ResponseEntity.ok().build())
);
}
@Override
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
String consumerGroupId,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.consumerGroup(consumerGroupId)
.consumerGroupActions(VIEW)
.operationName("getConsumerGroup")
.build();
.build());
return validateAccess(context)
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
return validateAccess.then(
consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
.map(ConsumerGroupMapper::toDetailsDto)
.map(ResponseEntity::ok))
.doOnEach(sig -> audit(context, sig));
.map(ResponseEntity::ok)
);
}
@Override
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
String topicName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(TopicAction.VIEW)
.operationName("getTopicConsumerGroups")
.build();
.build());
Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
@ -99,9 +99,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
.map(ResponseEntity::ok)
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
return validateAccess(context)
.then(job)
.doOnEach(sig -> audit(context, sig));
return validateAccess.then(job);
}
@Override
@ -114,13 +112,12 @@ public class ConsumerGroupsController extends AbstractController implements Cons
SortOrderDTO sortOrderDto,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
// consumer group access validation is within the service
.operationName("getConsumerGroupsPage")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
consumerGroupService.getConsumerGroupsPage(
getCluster(clusterName),
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
@ -131,7 +128,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
)
.map(this::convertPage)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -140,13 +137,12 @@ public class ConsumerGroupsController extends AbstractController implements Cons
Mono<ConsumerGroupOffsetsResetDTO> resetDto,
ServerWebExchange exchange) {
return resetDto.flatMap(reset -> {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(reset.getTopic())
.topicActions(TopicAction.VIEW)
.consumerGroupActions(RESET_OFFSETS)
.operationName("resetConsumerGroupOffsets")
.build();
.build());
Supplier<Mono<Void>> mono = () -> {
var cluster = getCluster(clusterName);
@ -186,9 +182,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
}
};
return validateAccess(context)
.then(mono.get())
.doOnEach(sig -> audit(context, sig));
return validateAccess.then(mono.get());
}).thenReturn(ResponseEntity.ok().build());
}
@ -199,7 +193,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
.stream()
.map(ConsumerGroupMapper::toDto)
.toList());
.collect(Collectors.toList()));
}
}

View file

@ -18,6 +18,7 @@ import com.provectus.kafka.ui.model.TaskDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
import com.provectus.kafka.ui.service.KafkaConnectService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Comparator;
import java.util.Map;
import java.util.Set;
@ -36,9 +37,8 @@ import reactor.core.publisher.Mono;
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
private static final String CONNECTOR_NAME = "connectorName";
private final KafkaConnectService kafkaConnectService;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
@ -54,16 +54,15 @@ public class KafkaConnectController extends AbstractController implements KafkaC
public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.operationName("getConnectors")
.build();
.build());
return validateAccess(context)
.thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
.doOnEach(sig -> audit(context, sig));
return validateAccess.thenReturn(
ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
);
}
@Override
@ -71,17 +70,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
@Valid Mono<NewConnectorDTO> connector,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
.operationName("createConnector")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -89,18 +87,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.connector(connectorName)
.operationName("getConnector")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -108,18 +105,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("deleteConnector")
.operationParams(Map.of(CONNECTOR_NAME, connectName))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@ -131,23 +126,14 @@ public class KafkaConnectController extends AbstractController implements KafkaC
SortOrderDTO sortOrder,
ServerWebExchange exchange
) {
var context = AccessContext.builder()
.cluster(clusterName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("getAllConnectors")
.build();
var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
? getConnectorsComparator(orderBy)
: getConnectorsComparator(orderBy).reversed();
Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
.filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
.filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName))
.sort(comparator);
.filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName));
return Mono.just(ResponseEntity.ok(job))
.doOnEach(sig -> audit(context, sig));
return Mono.just(ResponseEntity.ok(job.sort(comparator)));
}
@Override
@ -156,18 +142,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.operationName("getConnectorConfig")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
kafkaConnectService
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -176,19 +161,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
Mono<Map<String, Object>> requestBody,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("setConnectorConfig")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
.build());
return validateAccess(context).then(
kafkaConnectService
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
.map(ResponseEntity::ok))
.doOnEach(sig -> audit(context, sig));
return validateAccess.then(
kafkaConnectService
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
.map(ResponseEntity::ok));
}
@Override
@ -196,6 +178,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ConnectorActionDTO action,
ServerWebExchange exchange) {
ConnectAction[] connectActions;
if (RESTART_ACTIONS.contains(action)) {
connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.RESTART};
@ -203,19 +186,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.EDIT};
}
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(connectActions)
.operationName("updateConnectorState")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
kafkaConnectService
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -223,19 +204,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectName,
String connectorName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.operationName("getConnectorTasks")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
.build());
return validateAccess(context).thenReturn(
return validateAccess.thenReturn(
ResponseEntity
.ok(kafkaConnectService
.getConnectorTasks(getCluster(clusterName), connectName, connectorName))
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -243,37 +222,34 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName, Integer taskId,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
.operationName("restartConnectorTask")
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
kafkaConnectService
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
String clusterName, String connectName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.operationName("getConnectorPlugins")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
Mono.just(
ResponseEntity.ok(
kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
).doOnEach(sig -> audit(context, sig));
);
}
@Override

View file

@ -10,6 +10,7 @@ import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@ -27,42 +28,39 @@ import reactor.core.publisher.Mono;
public class KsqlController extends AbstractController implements KsqlApi {
private final KsqlServiceV2 ksqlServiceV2;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
Mono<KsqlCommandV2DTO> ksqlCmdDo,
Mono<KsqlCommandV2DTO>
ksqlCommand2Dto,
ServerWebExchange exchange) {
return ksqlCmdDo.flatMap(
command -> {
var context = AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.operationName("executeKsql")
.operationParams(command)
.build();
return validateAccess(context).thenReturn(
new KsqlCommandV2ResponseDTO().pipeId(
ksqlServiceV2.registerCommand(
getCluster(clusterName),
command.getKsql(),
Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
.doOnEach(sig -> audit(context, sig));
}
)
.map(ResponseEntity::ok);
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.build());
return validateAccess.then(
ksqlCommand2Dto.map(dto -> {
var id = ksqlServiceV2.registerCommand(
getCluster(clusterName),
dto.getKsql(),
Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
return new KsqlCommandV2ResponseDTO().pipeId(id);
}).map(ResponseEntity::ok)
);
}
@Override
public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
String pipeId,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.operationName("openKsqlResponsePipe")
.build();
.build());
return validateAccess(context).thenReturn(
return validateAccess.thenReturn(
ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
.map(table -> new KsqlResponseDTO()
.table(
@ -76,28 +74,22 @@ public class KsqlController extends AbstractController implements KsqlApi {
@Override
public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.operationName("listStreams")
.build();
.build());
return validateAccess(context)
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
.doOnEach(sig -> audit(context, sig));
return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
}
@Override
public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.operationName("listTables")
.build();
.build());
return validateAccess(context)
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
.doOnEach(sig -> audit(context, sig));
return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
}
}

View file

@ -15,16 +15,13 @@ import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.SerdeUsageDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import com.provectus.kafka.ui.service.DeserializationService;
import com.provectus.kafka.ui.service.MessagesService;
import com.provectus.kafka.ui.util.DynamicConfigOperations;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@ -32,7 +29,6 @@ import javax.annotation.Nullable;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.common.TopicPartition;
import org.springframework.http.ResponseEntity;
@ -49,34 +45,26 @@ public class MessagesController extends AbstractController implements MessagesAp
private final MessagesService messagesService;
private final DeserializationService deserializationService;
private final DynamicConfigOperations dynamicConfigOperations;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<Void>> deleteTopicMessages(
String clusterName, String topicName, @Valid List<Integer> partitions,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_DELETE)
.build();
.build());
return validateAccess(context).<ResponseEntity<Void>>then(
return validateAccess.then(
messagesService.deleteTopicMessages(
getCluster(clusterName),
topicName,
Optional.ofNullable(partitions).orElse(List.of())
).thenReturn(ResponseEntity.ok().build())
).doOnEach(sig -> audit(context, sig));
}
@Override
public Mono<ResponseEntity<SmartFilterTestExecutionResultDTO>> executeSmartFilterTest(
Mono<SmartFilterTestExecutionDTO> smartFilterTestExecutionDto, ServerWebExchange exchange) {
return smartFilterTestExecutionDto
.map(MessagesService::execSmartFilterTest)
.map(ResponseEntity::ok);
);
}
@Override
@ -91,19 +79,11 @@ public class MessagesController extends AbstractController implements MessagesAp
String keySerde,
String valueSerde,
ServerWebExchange exchange) {
var contextBuilder = AccessContext.builder()
final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.operationName("getTopicMessages");
if (StringUtils.isNoneEmpty(q) && MessageFilterTypeDTO.GROOVY_SCRIPT == filterQueryType) {
dynamicConfigOperations.checkIfFilteringGroovyEnabled();
}
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
contextBuilder.auditActions(AuditAction.VIEW);
}
.build());
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
@ -122,10 +102,7 @@ public class MessagesController extends AbstractController implements MessagesAp
)
);
var context = contextBuilder.build();
return validateAccess(context)
.then(job)
.doOnEach(sig -> audit(context, sig));
return validateAccess.then(job);
}
@Override
@ -133,18 +110,17 @@ public class MessagesController extends AbstractController implements MessagesAp
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_PRODUCE)
.operationName("sendTopicMessages")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
createTopicMessage.flatMap(msg ->
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
).map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
/**
@ -180,12 +156,12 @@ public class MessagesController extends AbstractController implements MessagesAp
String topicName,
SerdeUsageDTO use,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(TopicAction.VIEW)
.operationName("getSerdes")
.build();
.build());
TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
.key(use == SerdeUsageDTO.SERIALIZE
@ -195,14 +171,10 @@ public class MessagesController extends AbstractController implements MessagesAp
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
return validateAccess(context).then(
return validateAccess.then(
Mono.just(dto)
.subscribeOn(Schedulers.boundedElastic())
.map(ResponseEntity::ok)
);
}
}

View file

@ -13,8 +13,9 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
import com.provectus.kafka.ui.service.SchemaRegistryService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -35,6 +36,7 @@ public class SchemasController extends AbstractController implements SchemasApi
private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
private final SchemaRegistryService schemaRegistryService;
private final AccessControlService accessControlService;
@Override
protected KafkaCluster getCluster(String clusterName) {
@ -49,14 +51,13 @@ public class SchemasController extends AbstractController implements SchemasApi
public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.VIEW)
.operationName("checkSchemaCompatibility")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
newSchemaSubjectMono.flatMap(subjectDTO ->
schemaRegistryService.checksSchemaCompatibility(
getCluster(clusterName),
@ -65,20 +66,19 @@ public class SchemasController extends AbstractController implements SchemasApi
))
.map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schemaActions(SchemaAction.CREATE)
.operationName("createNewSchema")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
newSchemaSubjectMono.flatMap(newSubject ->
schemaRegistryService.registerNewSchema(
getCluster(clusterName),
@ -87,22 +87,20 @@ public class SchemasController extends AbstractController implements SchemasApi
)
).map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<Void>> deleteLatestSchema(
String clusterName, String subject, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.DELETE)
.operationName("deleteLatestSchema")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -110,16 +108,14 @@ public class SchemasController extends AbstractController implements SchemasApi
@Override
public Mono<ResponseEntity<Void>> deleteSchema(
String clusterName, String subject, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.DELETE)
.operationName("deleteSchema")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -127,16 +123,14 @@ public class SchemasController extends AbstractController implements SchemasApi
@Override
public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subjectName)
.schemaActions(SchemaAction.DELETE)
.operationName("deleteSchemaByVersion")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -144,20 +138,16 @@ public class SchemasController extends AbstractController implements SchemasApi
@Override
public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
String clusterName, String subjectName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subjectName)
.schemaActions(SchemaAction.VIEW)
.operationName("getAllVersionsBySubject")
.build();
.build());
Flux<SchemaSubjectDTO> schemas =
schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
.map(kafkaSrMapper::toDto);
return validateAccess(context)
.thenReturn(ResponseEntity.ok(schemas))
.doOnEach(sig -> audit(context, sig));
return validateAccess.thenReturn(ResponseEntity.ok(schemas));
}
@Override
@ -173,37 +163,34 @@ public class SchemasController extends AbstractController implements SchemasApi
public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
String subject,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.VIEW)
.operationName("getLatestSchema")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
.map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
String clusterName, String subject, Integer version, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.VIEW)
.operationName("getSchemaByVersion")
.operationParams(Map.of("subject", subject, "version", version))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
schemaRegistryService.getSchemaSubjectByVersion(
getCluster(clusterName), subject, version)
.map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -212,11 +199,6 @@ public class SchemasController extends AbstractController implements SchemasApi
@Valid Integer perPage,
@Valid String search,
ServerWebExchange serverWebExchange) {
var context = AccessContext.builder()
.cluster(clusterName)
.operationName("getSchemas")
.build();
return schemaRegistryService
.getAllSubjectNames(getCluster(clusterName))
.flatMapIterable(l -> l)
@ -234,32 +216,29 @@ public class SchemasController extends AbstractController implements SchemasApi
List<String> subjectsToRender = filteredSubjects.stream()
.skip(subjectToSkip)
.limit(pageSize)
.toList();
.collect(Collectors.toList());
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
}).map(ResponseEntity::ok)
.doOnEach(sig -> audit(context, sig));
}).map(ResponseEntity::ok);
}
@Override
public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
.operationName("updateGlobalSchemaCompatibilityLevel")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
compatibilityLevelMono
.flatMap(compatibilityLevelDTO ->
schemaRegistryService.updateGlobalSchemaCompatibility(
getCluster(clusterName),
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -268,14 +247,12 @@ public class SchemasController extends AbstractController implements SchemasApi
public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.schemaActions(SchemaAction.EDIT)
.operationName("updateSchemaCompatibilityLevel")
.operationParams(Map.of("subject", subject))
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
compatibilityLevelMono
.flatMap(compatibilityLevelDTO ->
schemaRegistryService.updateSchemaCompatibility(
@ -283,7 +260,6 @@ public class SchemasController extends AbstractController implements SchemasApi
subject,
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}

View file

@ -22,15 +22,14 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
import com.provectus.kafka.ui.model.TopicCreationDTO;
import com.provectus.kafka.ui.model.TopicDTO;
import com.provectus.kafka.ui.model.TopicDetailsDTO;
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
import com.provectus.kafka.ui.model.TopicUpdateDTO;
import com.provectus.kafka.ui.model.TopicsResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.TopicsService;
import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -52,79 +51,70 @@ public class TopicsController extends AbstractController implements TopicsApi {
private final TopicsService topicsService;
private final TopicAnalysisService topicAnalysisService;
private final ClusterMapper clusterMapper;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<TopicDTO>> createTopic(
String clusterName, @Valid Mono<TopicCreationDTO> topicCreationMono, ServerWebExchange exchange) {
return topicCreationMono.flatMap(topicCreation -> {
var context = AccessContext.builder()
.cluster(clusterName)
.topicActions(CREATE)
.operationName("createTopic")
.operationParams(topicCreation)
.build();
String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
return validateAccess(context)
.then(topicsService.createTopic(getCluster(clusterName), topicCreation))
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
.doOnEach(sig -> audit(context, sig));
});
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topicActions(CREATE)
.build());
return validateAccess.then(
topicsService.createTopic(getCluster(clusterName), topicCreation)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
);
}
@Override
public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
String topicName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, CREATE, DELETE)
.operationName("recreateTopic")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
topicsService.recreateTopic(getCluster(clusterName), topicName)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<TopicDTO>> cloneTopic(
String clusterName, String topicName, String newTopicName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, CREATE)
.operationName("cloneTopic")
.operationParams(Map.of("newTopicName", newTopicName))
.build();
.build());
return validateAccess(context)
.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
).doOnEach(sig -> audit(context, sig));
return validateAccess.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
);
}
@Override
public Mono<ResponseEntity<Void>> deleteTopic(
String clusterName, String topicName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(DELETE)
.operationName("deleteTopic")
.build();
.build());
return validateAccess(context)
.then(
topicsService.deleteTopic(getCluster(clusterName), topicName)
.thenReturn(ResponseEntity.ok().<Void>build())
).doOnEach(sig -> audit(context, sig));
return validateAccess.then(
topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok)
);
}
@ -132,40 +122,38 @@ public class TopicsController extends AbstractController implements TopicsApi {
public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
String clusterName, String topicName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.operationName("getTopicConfigs")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
.map(lst -> lst.stream()
.map(InternalTopicConfig::from)
.map(clusterMapper::toTopicConfig)
.toList())
.collect(toList()))
.map(Flux::fromIterable)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
String clusterName, String topicName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.operationName("getTopicDetails")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
topicsService.getTopicDetails(getCluster(clusterName), topicName)
.map(clusterMapper::toTopicDetails)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -178,11 +166,6 @@ public class TopicsController extends AbstractController implements TopicsApi {
@Valid SortOrderDTO sortOrder,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("getTopics")
.build();
return topicsService.getTopicsForPagination(getCluster(clusterName))
.flatMap(topics -> accessControlService.filterViewableTopics(topics, clusterName))
.flatMap(topics -> {
@ -206,13 +189,14 @@ public class TopicsController extends AbstractController implements TopicsApi {
.collect(toList());
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
.flatMapMany(Flux::fromIterable)
.collectList()
.map(topicsToRender ->
new TopicsResponseDTO()
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
.pageCount(totalPages));
})
.map(ResponseEntity::ok)
.doOnEach(sig -> audit(context, sig));
.map(ResponseEntity::ok);
}
@Override
@ -220,19 +204,18 @@ public class TopicsController extends AbstractController implements TopicsApi {
String clusterName, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, EDIT)
.operationName("updateTopic")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
topicsService
.updateTopic(getCluster(clusterName), topicName, topicUpdate)
.map(clusterMapper::toTopic)
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -241,17 +224,17 @@ public class TopicsController extends AbstractController implements TopicsApi {
Mono<PartitionsIncreaseDTO> partitionsIncrease,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, EDIT)
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
partitionsIncrease.flatMap(partitions ->
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
).map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
@ -260,34 +243,31 @@ public class TopicsController extends AbstractController implements TopicsApi {
Mono<ReplicationFactorChangeDTO> replicationFactorChange,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, EDIT)
.operationName("changeReplicationFactor")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
replicationFactorChange
.flatMap(rfc ->
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
.map(ResponseEntity::ok)
).doOnEach(sig -> audit(context, sig));
);
}
@Override
public Mono<ResponseEntity<Void>> analyzeTopic(String clusterName, String topicName, ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.operationName("analyzeTopic")
.build();
.build());
return validateAccess(context).then(
return validateAccess.then(
topicAnalysisService.analyze(getCluster(clusterName), topicName)
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -295,17 +275,15 @@ public class TopicsController extends AbstractController implements TopicsApi {
@Override
public Mono<ResponseEntity<Void>> cancelTopicAnalysis(String clusterName, String topicName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.operationName("cancelTopicAnalysis")
.build();
.build());
return validateAccess(context)
.then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
.doOnEach(sig -> audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName);
return validateAccess.thenReturn(ResponseEntity.ok().build());
}
@ -314,46 +292,15 @@ public class TopicsController extends AbstractController implements TopicsApi {
String topicName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.operationName("getTopicAnalysis")
.build();
.build());
return validateAccess(context)
.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
.map(ResponseEntity::ok)
.orElseGet(() -> ResponseEntity.notFound().build()))
.doOnEach(sig -> audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
String topicName,
ServerWebExchange exchange) {
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.operationName("getActiveProducerStates")
.build();
Comparator<TopicProducerStateDTO> ordering =
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
.flatMapMany(statesMap ->
Flux.fromStream(
statesMap.entrySet().stream()
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
.sorted(ordering)));
return validateAccess(context)
.thenReturn(states)
return validateAccess.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
.map(ResponseEntity::ok)
.doOnEach(sig -> audit(context, sig));
.orElseGet(() -> ResponseEntity.notFound().build()));
}
private Comparator<InternalTopic> getComparatorForTopic(

View file

@ -1,23 +1,38 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import java.time.Duration;
import java.time.Instant;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
public abstract class AbstractEmitter {
private final MessagesProcessing messagesProcessing;
private final PollingSettings pollingSettings;
private final PollingThrottler throttler;
protected final PollingSettings pollingSettings;
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
this.messagesProcessing = messagesProcessing;
this.pollingSettings = pollingSettings;
this.throttler = pollingSettings.getPollingThrottler();
}
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
var records = consumer.pollEnhanced(pollingSettings.getPollTimeout());
sendConsuming(sink, records);
protected ConsumerRecords<Bytes, Bytes> poll(
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
return poll(sink, consumer, pollingSettings.getPollTimeout());
}
protected ConsumerRecords<Bytes, Bytes> poll(
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer, Duration timeout) {
Instant start = Instant.now();
ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
Instant finish = Instant.now();
int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
throttler.throttleAfterPoll(polledBytes);
return records;
}
@ -25,16 +40,19 @@ abstract class AbstractEmitter implements java.util.function.Consumer<FluxSink<T
return messagesProcessing.limitReached();
}
protected void send(FluxSink<TopicMessageEventDTO> sink, Iterable<ConsumerRecord<Bytes, Bytes>> records) {
messagesProcessing.send(sink, records);
protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecord<Bytes, Bytes> msg) {
messagesProcessing.sendMsg(sink, msg);
}
protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
messagesProcessing.sendPhase(sink, name);
}
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink, PolledRecords records) {
messagesProcessing.sentConsumingInfo(sink, records);
protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecords<Bytes, Bytes> records,
long elapsed) {
return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
}
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {

View file

@ -1,60 +0,0 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.TopicMessageDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
import java.util.Comparator;
import java.util.Map;
import java.util.TreeMap;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.kafka.common.TopicPartition;
public class BackwardEmitter extends RangePollingEmitter {
public BackwardEmitter(Supplier<EnhancedConsumer> consumerSupplier,
ConsumerPosition consumerPosition,
int messagesPerPage,
ConsumerRecordDeserializer deserializer,
Predicate<TopicMessageDTO> filter,
PollingSettings pollingSettings) {
super(
consumerSupplier,
consumerPosition,
messagesPerPage,
new MessagesProcessing(
deserializer,
filter,
false,
messagesPerPage
),
pollingSettings
);
}
@Override
protected TreeMap<TopicPartition, FromToOffset> nextPollingRange(TreeMap<TopicPartition, FromToOffset> prevRange,
SeekOperations seekOperations) {
TreeMap<TopicPartition, Long> readToOffsets = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
if (prevRange.isEmpty()) {
readToOffsets.putAll(seekOperations.getOffsetsForSeek());
} else {
readToOffsets.putAll(
prevRange.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().from()))
);
}
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readToOffsets.size());
TreeMap<TopicPartition, FromToOffset> result = new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
readToOffsets.forEach((tp, toOffset) -> {
long tpStartOffset = seekOperations.getBeginOffsets().get(tp);
if (toOffset > tpStartOffset) {
result.put(tp, new FromToOffset(Math.max(tpStartOffset, toOffset - msgsToPollPerPartition), toOffset));
}
});
return result;
}
}

View file

@ -0,0 +1,128 @@
package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.TreeMap;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
@Slf4j
public class BackwardRecordEmitter
extends AbstractEmitter
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
private final ConsumerPosition consumerPosition;
private final int messagesPerPage;
public BackwardRecordEmitter(
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
ConsumerPosition consumerPosition,
int messagesPerPage,
MessagesProcessing messagesProcessing,
PollingSettings pollingSettings) {
super(messagesProcessing, pollingSettings);
this.consumerPosition = consumerPosition;
this.messagesPerPage = messagesPerPage;
this.consumerSupplier = consumerSupplier;
}
@Override
public void accept(FluxSink<TopicMessageEventDTO> sink) {
log.debug("Starting backward polling for {}", consumerPosition);
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
sendPhase(sink, "Created consumer");
var seekOperations = SeekOperations.create(consumer, consumerPosition);
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) {
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
if (sink.isCancelled()) {
return; //fast return in case of sink cancellation
}
long beginOffset = seekOperations.getBeginOffsets().get(tp);
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
.forEach(r -> sendMessage(sink, r));
if (beginOffset == readFromOffset) {
// we fully read this partition -> removing it from polling iterations
readUntilOffsets.remove(tp);
} else {
// updating 'to' offset for next polling iteration
readUntilOffsets.put(tp, readFromOffset);
}
});
if (readUntilOffsets.isEmpty()) {
log.debug("begin reached after partitions poll iteration");
} else if (sink.isCancelled()) {
log.debug("sink is cancelled after partitions poll iteration");
}
}
sendFinishStatsAndCompleteSink(sink);
log.debug("Polling finished");
} catch (InterruptException kafkaInterruptException) {
log.debug("Polling finished due to thread interruption");
sink.complete();
} catch (Exception e) {
log.error("Error occurred while consuming records", e);
sink.error(e);
}
}
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
TopicPartition tp,
long fromOffset,
long toOffset,
Consumer<Bytes, Bytes> consumer,
FluxSink<TopicMessageEventDTO> sink
) {
consumer.assign(Collections.singleton(tp));
consumer.seek(tp, fromOffset);
sendPhase(sink, String.format("Polling partition: %s from offset %s", tp, fromOffset));
int desiredMsgsToPoll = (int) (toOffset - fromOffset);
var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
while (!sink.isCancelled()
&& !sendLimitReached()
&& recordsToSend.size() < desiredMsgsToPoll
&& !emptyPolls.noDataEmptyPollsReached()) {
var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
emptyPolls.count(polledRecords);
log.debug("{} records polled from {}", polledRecords.count(), tp);
var filteredRecords = polledRecords.records(tp).stream()
.filter(r -> r.offset() < toOffset)
.toList();
if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
// we already read all messages in target offsets interval
break;
}
recordsToSend.addAll(filteredRecords);
}
log.debug("{} records to send", recordsToSend.size());
Collections.reverse(recordsToSend);
return recordsToSend;
}
}

View file

@ -2,6 +2,9 @@ package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
class ConsumingStats {
@ -9,37 +12,41 @@ class ConsumingStats {
private long bytes = 0;
private int records = 0;
private long elapsed = 0;
private int filterApplyErrors = 0;
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
bytes += polledRecords.bytes();
records += polledRecords.count();
elapsed += polledRecords.elapsed().toMillis();
/**
* returns bytes polled.
*/
int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecords<Bytes, Bytes> polledRecords,
long elapsed,
int filterApplyErrors) {
int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
bytes += polledBytes;
this.records += polledRecords.count();
this.elapsed += elapsed;
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
.consuming(createConsumingStats())
.consuming(createConsumingStats(sink, filterApplyErrors))
);
return polledBytes;
}
void incFilterApplyError() {
filterApplyErrors++;
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.DONE)
.consuming(createConsumingStats())
.consuming(createConsumingStats(sink, filterApplyErrors))
);
}
private TopicMessageConsumingDTO createConsumingStats() {
private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
int filterApplyErrors) {
return new TopicMessageConsumingDTO()
.bytesConsumed(bytes)
.elapsedMs(elapsed)
.isCancelled(false)
.bytesConsumed(this.bytes)
.elapsedMs(this.elapsed)
.isCancelled(sink.isCancelled())
.filterApplyErrors(filterApplyErrors)
.messagesConsumed(records);
.messagesConsumed(this.records);
}
}

View file

@ -0,0 +1,28 @@
package com.provectus.kafka.ui.emitter;
import org.apache.kafka.clients.consumer.ConsumerRecords;
// In some situations it is hard to say whether records range (between two offsets) was fully polled.
// This happens when we have holes in records sequences that is usual case for compact topics or
// topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
// there is no guarantee that you will ever see record with offset Y.
// To workaround this we can assume that after N consecutive empty polls all target messages were read.
public class EmptyPollsCounter {
private final int maxEmptyPolls;
private int emptyPolls = 0;
EmptyPollsCounter(int maxEmptyPolls) {
this.maxEmptyPolls = maxEmptyPolls;
}
public void count(ConsumerRecords<?, ?> polled) {
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
}
public boolean noDataEmptyPollsReached() {
return emptyPolls >= maxEmptyPolls;
}
}

Some files were not shown because too many files have changed in this diff Show more