Compare commits
11 commits
master
...
bulk-conne
Author | SHA1 | Date | |
---|---|---|---|
![]() |
34921e0c31 | ||
![]() |
42eafabd06 | ||
![]() |
174e4d8f6f | ||
![]() |
19c0b29362 | ||
![]() |
8a8fedf11e | ||
![]() |
52fa539e3f | ||
![]() |
b9798377c4 | ||
![]() |
afc016a7dc | ||
![]() |
6cd3a445c9 | ||
![]() |
cc71300b82 | ||
![]() |
f9ec61ef70 |
421 changed files with 6483 additions and 12985 deletions
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
|
@ -14,5 +14,5 @@
|
|||
# TESTS
|
||||
/kafka-ui-e2e-checks/ @provectus/kafka-qa
|
||||
|
||||
# INFRA
|
||||
/.github/workflows/ @provectus/kafka-devops
|
||||
# HELM CHARTS
|
||||
/charts/ @provectus/kafka-devops
|
||||
|
|
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,8 +1,5 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Report helm issue
|
||||
url: https://github.com/provectus/kafka-ui-charts
|
||||
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
|
||||
- name: Official documentation
|
||||
url: https://docs.kafka-ui.provectus.io/
|
||||
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
|
||||
|
|
92
.github/ISSUE_TEMPLATE/helm.yml
vendored
Normal file
92
.github/ISSUE_TEMPLATE/helm.yml
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
name: "⎈ K8s/Helm problem report"
|
||||
description: "Report a problem with k8s/helm charts/etc"
|
||||
labels: ["status/triage", "scope/k8s"]
|
||||
assignees: []
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Hi, thanks for raising the issue(-s), all contributions really matter!
|
||||
Please, note that we'll close the issue without further explanation if you don't follow
|
||||
this template and don't provide the information requested within this template.
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Issue submitter TODO list
|
||||
description: By you checking these checkboxes we can be sure you've done the essential things.
|
||||
options:
|
||||
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
|
||||
required: true
|
||||
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
|
||||
required: true
|
||||
- label: I've tried running `master`-labeled docker image and the issue still persists there
|
||||
required: true
|
||||
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the bug (actual behavior)
|
||||
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A clear and concise description of what you expected to happen
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your installation details
|
||||
description: |
|
||||
How do you run the app? Please provide as much info as possible:
|
||||
1. App version (commit hash in the top left corner of the UI)
|
||||
2. Helm chart version
|
||||
3. Your application config. Please remove the sensitive info like passwords or API keys.
|
||||
4. Any IAAC configs
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: |
|
||||
Please write down the order of the actions required to reproduce the issue.
|
||||
For the advanced setups/complicated issue, we might need you to provide
|
||||
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: |
|
||||
If applicable, add screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Logs
|
||||
description: |
|
||||
If applicable, *upload* screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Add any other context about the problem here. E.G.:
|
||||
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
|
||||
Were they successful or the same issue occurred? Please provide steps as well.
|
||||
2. Related issues (if there are any).
|
||||
3. Logs (if available)
|
||||
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
|
||||
validations:
|
||||
required: false
|
8
.github/release_drafter.yaml
vendored
8
.github/release_drafter.yaml
vendored
|
@ -16,26 +16,18 @@ exclude-labels:
|
|||
- 'type/refactoring'
|
||||
|
||||
categories:
|
||||
- title: '🚩 Breaking Changes'
|
||||
labels:
|
||||
- 'impact/changelog'
|
||||
|
||||
- title: '⚙️Features'
|
||||
labels:
|
||||
- 'type/feature'
|
||||
|
||||
- title: '🪛Enhancements'
|
||||
labels:
|
||||
- 'type/enhancement'
|
||||
|
||||
- title: '🔨Bug Fixes'
|
||||
labels:
|
||||
- 'type/bug'
|
||||
|
||||
- title: 'Security'
|
||||
labels:
|
||||
- 'type/security'
|
||||
|
||||
- title: '⎈ Helm/K8S Changes'
|
||||
labels:
|
||||
- 'scope/k8s'
|
||||
|
|
6
.github/workflows/aws_publisher.yaml
vendored
6
.github/workflows/aws_publisher.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release: AWS Marketplace Publisher"
|
||||
name: AWS Marketplace Publisher
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -24,14 +24,14 @@ jobs:
|
|||
- name: Clone infra repo
|
||||
run: |
|
||||
echo "Cloning repo..."
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
|
||||
echo "Cd to packer DIR..."
|
||||
cd kafka-ui-infra/ami
|
||||
echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV
|
||||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
7
.github/workflows/backend.yml
vendored
7
.github/workflows/backend.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Backend: PR/master build & test"
|
||||
name: Backend build and test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -8,9 +8,6 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-api/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -32,7 +29,7 @@ jobs:
|
|||
key: ${{ runner.os }}-sonar
|
||||
restore-keys: ${{ runner.os }}-sonar
|
||||
- name: Build and analyze pull request target
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
|
||||
|
|
4
.github/workflows/block_merge.yml
vendored
4
.github/workflows/block_merge.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: PR block merge"
|
||||
name: Pull Request Labels
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
@ -6,7 +6,7 @@ jobs:
|
|||
block_merge:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
- uses: mheap/github-action-required-labels@v4
|
||||
with:
|
||||
mode: exactly
|
||||
count: 0
|
||||
|
|
34
.github/workflows/branch-deploy.yml
vendored
34
.github/workflows/branch-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing: Init env"
|
||||
name: Feature testing init
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -73,33 +73,29 @@ jobs:
|
|||
steps:
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
- name: create deployment
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
echo "Branch:${{ needs.build.outputs.tag }}"
|
||||
./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git add ../kafka-ui-from-branch/
|
||||
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
|
||||
|
||||
- name: update status check for private deployment
|
||||
- name: make comment with private deployment link
|
||||
if: ${{ github.event.label.name == 'status/feature_testing' }}
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Click Details button to open custom deployment page"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
|
||||
|
||||
- name: update status check for public deployment
|
||||
- name: make comment with public deployment link
|
||||
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Click Details button to open custom deployment page"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes
|
||||
|
|
14
.github/workflows/branch-remove.yml
vendored
14
.github/workflows/branch-remove.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing: Destroy env"
|
||||
name: Feature testing destroy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -11,12 +11,18 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
- name: remove env
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
./delete-env.sh pr${{ github.event.pull_request.number }} || true
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git add ../kafka-ui-from-branch/
|
||||
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
|
||||
- name: make comment with deployment link
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment removed
|
||||
|
|
5
.github/workflows/build-public-image.yml
vendored
5
.github/workflows/build-public-image.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Image Testing: Deploy"
|
||||
name: Build Docker image and push
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -70,5 +70,6 @@ jobs:
|
|||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
|
||||
|
||||
outputs:
|
||||
tag: ${{ steps.extract_branch.outputs.tag }}
|
||||
|
|
28
.github/workflows/create-branch-for-helm.yaml
vendored
Normal file
28
.github/workflows/create-branch-for-helm.yaml
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
name: Prepare helm release
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [prepare-helm-release]
|
||||
jobs:
|
||||
change-app-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
- name: Change versions
|
||||
run: |
|
||||
git checkout -b release-${{ github.event.client_payload.appversion}}
|
||||
version=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
version=${version%.*}.$((${version##*.}+1))
|
||||
sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
|
||||
sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
|
||||
git add charts/kafka-ui/Chart.yaml
|
||||
git commit -m "release ${version}"
|
||||
git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
|
||||
- name: Slack Notification
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
|
||||
SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
2
.github/workflows/cve.yaml
vendored
2
.github/workflows/cve.yaml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Run CVE checks
|
||||
uses: aquasecurity/trivy-action@0.12.0
|
||||
uses: aquasecurity/trivy-action@0.10.0
|
||||
with:
|
||||
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
|
||||
format: "table"
|
||||
|
|
10
.github/workflows/delete-public-image.yml
vendored
10
.github/workflows/delete-public-image.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Image Testing: Delete"
|
||||
name: Delete Public ECR Image
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -32,3 +32,9 @@ jobs:
|
|||
--repository-name kafka-ui-custom-build \
|
||||
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
|
||||
--region us-east-1
|
||||
- name: make comment with private deployment link
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed
|
||||
|
|
2
.github/workflows/documentation.yaml
vendored
2
.github/workflows/documentation.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Docs: URL linter"
|
||||
name: Documentation URLs linter
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
4
.github/workflows/e2e-automation.yml
vendored
4
.github/workflows/e2e-automation.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: Automation suite"
|
||||
name: E2E Automation suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
12
.github/workflows/e2e-checks.yaml
vendored
12
.github/workflows/e2e-checks.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: PR healthcheck"
|
||||
name: E2E PR health check
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [ "opened", "edited", "reopened", "synchronize" ]
|
||||
|
@ -8,8 +8,6 @@ on:
|
|||
- "kafka-ui-react-app/**"
|
||||
- "kafka-ui-e2e-checks/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
statuses: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -18,10 +16,10 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-central-1
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
|
@ -47,7 +45,7 @@ jobs:
|
|||
# use the following command until #819 will be fixed
|
||||
run: |
|
||||
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
|
||||
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d && until [ "$(docker exec kafka-ui wget --spider --server-response http://localhost:8080/actuator/health 2>&1 | grep -c 'HTTP/1.1 200 OK')" == "1" ]; do echo "Waiting for kafka-ui ..." && sleep 1; done
|
||||
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
|
||||
- name: Run test suite
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
|
||||
|
|
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: Manual suite"
|
||||
name: E2E Manual suite
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
4
.github/workflows/e2e-weekly.yml
vendored
4
.github/workflows/e2e-weekly.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "E2E: Weekly suite"
|
||||
name: E2E Weekly suite
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 1 * * 1'
|
||||
|
@ -11,7 +11,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
15
.github/workflows/frontend.yaml
vendored
15
.github/workflows/frontend.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Frontend: PR/master build & test"
|
||||
name: Frontend build and test
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -8,9 +8,6 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-contract/**"
|
||||
- "kafka-ui-react-app/**"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
env:
|
||||
|
@ -23,13 +20,13 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
with:
|
||||
version: 8.6.12
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.8.1
|
||||
uses: actions/setup-node@v3.6.0
|
||||
with:
|
||||
node-version: "18.17.1"
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
|
||||
- name: Install Node dependencies
|
||||
|
@ -49,7 +46,7 @@ jobs:
|
|||
cd kafka-ui-react-app/
|
||||
pnpm test:CI
|
||||
- name: SonarCloud Scan
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
uses: workshur/sonarcloud-github-action@improved_basedir
|
||||
with:
|
||||
projectBaseDir: ./kafka-ui-react-app
|
||||
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
|
||||
|
|
38
.github/workflows/helm.yaml
vendored
Normal file
38
.github/workflows/helm.yaml
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
name: Helm linter
|
||||
on:
|
||||
pull_request:
|
||||
types: ["opened", "edited", "reopened", "synchronize"]
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- "charts/**"
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Helm tool installer
|
||||
uses: Azure/setup-helm@v3
|
||||
- name: Setup Kubeval
|
||||
uses: lra/setup-kubeval@v1.0.1
|
||||
#check, was helm version increased in Chart.yaml?
|
||||
- name: Check version
|
||||
shell: bash
|
||||
run: |
|
||||
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
|
||||
echo $helm_version_old
|
||||
echo $helm_version_new
|
||||
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
||||
- name: Run kubeval
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
|
||||
K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e '^v1\.1[0-7]\{1\}' | cut -c2-)
|
||||
echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
|
||||
echo ""
|
||||
for version in $K8S_VERSIONS
|
||||
do
|
||||
echo $version;
|
||||
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
|
||||
done
|
9
.github/workflows/master.yaml
vendored
9
.github/workflows/master.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Master: Build & deploy"
|
||||
name: Master branch build & deploy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
@ -58,7 +58,6 @@ jobs:
|
|||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: kafka-ui-api
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: false
|
||||
push: true
|
||||
tags: |
|
||||
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
|
||||
|
@ -74,11 +73,11 @@ jobs:
|
|||
#################################
|
||||
- name: update-master-deployment
|
||||
run: |
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch master
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
|
||||
./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git add ../kafka-ui/*
|
||||
git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push
|
||||
|
|
7
.github/workflows/pr-checks.yaml
vendored
7
.github/workflows/pr-checks.yaml
vendored
|
@ -1,14 +1,13 @@
|
|||
name: "PR: Checklist linter"
|
||||
name: "PR Checklist checked"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
permissions:
|
||||
checks: write
|
||||
|
||||
jobs:
|
||||
task-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.2
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.1
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- uses: dekinderfiets/pr-description-enforcer@0.0.1
|
||||
|
|
39
.github/workflows/release-helm.yaml
vendored
Normal file
39
.github/workflows/release-helm.yaml
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
name: Release helm
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "charts/**"
|
||||
|
||||
jobs:
|
||||
release-helm:
|
||||
runs-on:
|
||||
ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
|
||||
- uses: azure/setup-helm@v3
|
||||
|
||||
- name: add chart #realse helm with new version
|
||||
run: |
|
||||
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
|
||||
MSG=$(helm package charts/kafka-ui)
|
||||
git fetch origin
|
||||
git stash
|
||||
git checkout -b gh-pages origin/gh-pages
|
||||
git pull
|
||||
helm repo index .
|
||||
git add -f ${MSG##*/} index.yaml
|
||||
git commit -m "release ${VERSION}"
|
||||
git push
|
||||
- uses: rickstaa/action-create-tag@v1 #create new tag
|
||||
with:
|
||||
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"
|
2
.github/workflows/release-serde-api.yaml
vendored
2
.github/workflows/release-serde-api.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release: Serde API"
|
||||
name: Release serde api
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
|
|
11
.github/workflows/release.yaml
vendored
11
.github/workflows/release.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Release"
|
||||
name: Release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
@ -34,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
@ -77,7 +77,6 @@ jobs:
|
|||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: kafka-ui-api
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: false
|
||||
push: true
|
||||
tags: |
|
||||
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
|
||||
|
@ -89,12 +88,14 @@ jobs:
|
|||
|
||||
charts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
needs: release
|
||||
steps:
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
|
||||
repository: provectus/kafka-ui-charts
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repository: provectus/kafka-ui
|
||||
event-type: prepare-helm-release
|
||||
client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'
|
||||
|
|
19
.github/workflows/release_drafter.yml
vendored
19
.github/workflows/release_drafter.yml
vendored
|
@ -1,34 +1,19 @@
|
|||
name: "Infra: Release Drafter run"
|
||||
name: Release Drafter
|
||||
|
||||
on:
|
||||
push:
|
||||
# branches to consider in the event; optional, defaults to all
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Release version'
|
||||
required: false
|
||||
branch:
|
||||
description: 'Target branch'
|
||||
required: false
|
||||
default: 'master'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: release-drafter/release-drafter@v5
|
||||
with:
|
||||
config-name: release_drafter.yaml
|
||||
disable-autolabeler: true
|
||||
version: ${{ github.event.inputs.version }}
|
||||
commitish: ${{ github.event.inputs.branch }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
10
.github/workflows/separate_env_public_create.yml
vendored
10
.github/workflows/separate_env_public_create.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing Public: Init env"
|
||||
name: Separate environment create
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -76,14 +76,14 @@ jobs:
|
|||
steps:
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
|
||||
- name: separate env create
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git add -A
|
||||
git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Feature Testing Public: Destroy env"
|
||||
name: Separate environment remove
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -13,12 +13,12 @@ jobs:
|
|||
steps:
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
- name: separate environment remove
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git add -A
|
||||
git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true
|
||||
|
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: 'Infra: Close stale issues'
|
||||
name: 'Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
|
|
4
.github/workflows/terraform-deploy.yml
vendored
4
.github/workflows/terraform-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Terraform deploy"
|
||||
name: Terraform deploy
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -26,7 +26,7 @@ jobs:
|
|||
echo "Terraform will be triggered in this dir $TF_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/triage_issues.yml
vendored
2
.github/workflows/triage_issues.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Triage: Apply triage label for issues"
|
||||
name: Add triage label to new issues
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
|
|
2
.github/workflows/triage_prs.yml
vendored
2
.github/workflows/triage_prs.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Triage: Apply triage label for PRs"
|
||||
name: Add triage label to new PRs
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
|
@ -7,9 +7,7 @@ on:
|
|||
issues:
|
||||
types:
|
||||
- opened
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
welcome:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
2
.github/workflows/workflow_linter.yaml
vendored
2
.github/workflows/workflow_linter.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Infra: Workflow linter"
|
||||
name: "Workflow linter"
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -31,9 +31,6 @@ build/
|
|||
.vscode/
|
||||
/kafka-ui-api/app/node
|
||||
|
||||
### SDKMAN ###
|
||||
.sdkmanrc
|
||||
|
||||
.DS_Store
|
||||
*.code-workspace
|
||||
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
|
||||
</p>
|
||||
|
||||
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
|
||||
|
||||
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
|
@ -91,7 +87,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
|
|||
|
||||
Then access the web UI at [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
|
||||
|
||||
## Persistent installation
|
||||
|
||||
|
@ -103,7 +99,7 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
DYNAMIC_CONFIG_ENABLED: 'true'
|
||||
DYNAMIC_CONFIG_ENABLED: true
|
||||
volumes:
|
||||
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
|
||||
```
|
||||
|
|
|
@ -6,8 +6,7 @@ Following versions of the project are currently being supported with security up
|
|||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.7.x | :white_check_mark: |
|
||||
| 0.6.x | :x: |
|
||||
| 0.6.x | :white_check_mark: |
|
||||
| 0.5.x | :x: |
|
||||
| 0.4.x | :x: |
|
||||
| 0.3.x | :x: |
|
||||
|
|
25
charts/kafka-ui/.helmignore
Normal file
25
charts/kafka-ui/.helmignore
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
example/
|
||||
README.md
|
7
charts/kafka-ui/Chart.yaml
Normal file
7
charts/kafka-ui/Chart.yaml
Normal file
|
@ -0,0 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: kafka-ui
|
||||
description: A Helm chart for kafka-UI
|
||||
type: application
|
||||
version: 0.6.2
|
||||
appVersion: v0.6.2
|
||||
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
1
charts/kafka-ui/README.md
Normal file
1
charts/kafka-ui/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.
|
3
charts/kafka-ui/index.yaml
Normal file
3
charts/kafka-ui/index.yaml
Normal file
|
@ -0,0 +1,3 @@
|
|||
apiVersion: v1
|
||||
entries: {}
|
||||
generated: "2021-11-11T12:26:08.479581+03:00"
|
21
charts/kafka-ui/templates/NOTES.txt
Normal file
21
charts/kafka-ui/templates/NOTES.txt
Normal file
|
@ -0,0 +1,21 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
|
||||
{{- end }}
|
84
charts/kafka-ui/templates/_helpers.tpl
Normal file
84
charts/kafka-ui/templates/_helpers.tpl
Normal file
|
@ -0,0 +1,84 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kafka-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kafka-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kafka-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kafka-ui.labels" -}}
|
||||
helm.sh/chart: {{ include "kafka-ui.chart" . }}
|
||||
{{ include "kafka-ui.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kafka-ui.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kafka-ui.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
This allows us to check if the registry of the image is specified or not.
|
||||
*/}}
|
||||
{{- define "kafka-ui.imageName" -}}
|
||||
{{- $registryName := .Values.image.registry -}}
|
||||
{{- if .Values.global }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- $registryName = .Values.global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- $repository := .Values.image.repository -}}
|
||||
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
|
||||
{{- if $registryName }}
|
||||
{{- printf "%s/%s:%s" $registryName $repository $tag -}}
|
||||
{{- else }}
|
||||
{{- printf "%s:%s" $repository $tag -}}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
10
charts/kafka-ui/templates/configmap.yaml
Normal file
10
charts/kafka-ui/templates/configmap.yaml
Normal file
|
@ -0,0 +1,10 @@
|
|||
{{- if .Values.envs.config -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- toYaml .Values.envs.config | nindent 2 }}
|
||||
{{- end -}}
|
11
charts/kafka-ui/templates/configmap_fromValues.yaml
Normal file
11
charts/kafka-ui/templates/configmap_fromValues.yaml
Normal file
|
@ -0,0 +1,11 @@
|
|||
{{- if .Values.yamlApplicationConfig -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}-fromvalues
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yml: |-
|
||||
{{- toYaml .Values.yamlApplicationConfig | nindent 4}}
|
||||
{{ end }}
|
150
charts/kafka-ui/templates/deployment.yaml
Normal file
150
charts/kafka-ui/templates/deployment.yaml
Normal file
|
@ -0,0 +1,150 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
labels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.initContainers }}
|
||||
initContainers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "kafka-ui.imageName" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
|
||||
env:
|
||||
{{- with .Values.env }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: SPRING_CONFIG_ADDITIONAL-LOCATION
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
value: /kafka-ui/config.yml
|
||||
{{- else if .Values.yamlApplicationConfigConfigMap }}
|
||||
value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
envFrom:
|
||||
{{- if .Values.existingConfigMap }}
|
||||
- configMapRef:
|
||||
name: {{ .Values.existingConfigMap }}
|
||||
{{- end }}
|
||||
{{- if .Values.envs.config }}
|
||||
- configMapRef:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.existingSecret }}
|
||||
- secretRef:
|
||||
name: {{ .Values.existingSecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.envs.secret}}
|
||||
- secretRef:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
{{- end}}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
{{- if .Values.probes.useHttpsScheme }}
|
||||
scheme: HTTPS
|
||||
{{- end }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
{{- if .Values.probes.useHttpsScheme }}
|
||||
scheme: HTTPS
|
||||
{{- end }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
|
||||
volumeMounts:
|
||||
{{- with .Values.volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
- name: kafka-ui-yaml-conf
|
||||
mountPath: /kafka-ui/
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: kafka-ui-yaml-conf-configmap
|
||||
mountPath: /kafka-ui/
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
|
||||
volumes:
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
- name: kafka-ui-yaml-conf
|
||||
configMap:
|
||||
name: {{ include "kafka-ui.fullname" . }}-fromvalues
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: kafka-ui-yaml-conf-configmap
|
||||
configMap:
|
||||
name: {{ .Values.yamlApplicationConfigConfigMap.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
46
charts/kafka-ui/templates/hpa.yaml
Normal file
46
charts/kafka-ui/templates/hpa.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
|
||||
{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
|
||||
apiVersion: autoscaling/v2
|
||||
{{- else }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
{{- end }}
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
{{- if $isHigher1p25 }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- else }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
{{- if $isHigher1p25 }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- else }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
89
charts/kafka-ui/templates/ingress.yaml
Normal file
89
charts/kafka-ui/templates/ingress.yaml
Normal file
|
@ -0,0 +1,89 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "kafka-ui.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
|
||||
{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else }}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls.enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ tpl .Values.ingress.host . }}
|
||||
secretName: {{ .Values.ingress.tls.secretName }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
port:
|
||||
number: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
port:
|
||||
number: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- if tpl .Values.ingress.host . }}
|
||||
host: {{tpl .Values.ingress.host . }}
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- if tpl .Values.ingress.host . }}
|
||||
host: {{ tpl .Values.ingress.host . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
18
charts/kafka-ui/templates/networkpolicy-egress.yaml
Normal file
18
charts/kafka-ui/templates/networkpolicy-egress.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
{{- if .Values.networkPolicy.egressRules.customRules }}
|
||||
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
18
charts/kafka-ui/templates/networkpolicy-ingress.yaml
Normal file
18
charts/kafka-ui/templates/networkpolicy-ingress.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
{{- if .Values.networkPolicy.ingressRules.customRules }}
|
||||
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
13
charts/kafka-ui/templates/secret.yaml
Normal file
13
charts/kafka-ui/templates/secret.yaml
Normal file
|
@ -0,0 +1,13 @@
|
|||
{{- if .Values.envs.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $key, $val := .Values.envs.secret }}
|
||||
{{ $key }}: {{ $val | b64enc | quote }}
|
||||
{{- end -}}
|
||||
{{- end}}
|
22
charts/kafka-ui/templates/service.yaml
Normal file
22
charts/kafka-ui/templates/service.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}
|
12
charts/kafka-ui/templates/serviceaccount.yaml
Normal file
12
charts/kafka-ui/templates/serviceaccount.yaml
Normal file
|
@ -0,0 +1,12 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
161
charts/kafka-ui/values.yaml
Normal file
161
charts/kafka-ui/values.yaml
Normal file
|
@ -0,0 +1,161 @@
|
|||
replicaCount: 1
|
||||
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: provectuslabs/kafka-ui
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
existingConfigMap: ""
|
||||
yamlApplicationConfig:
|
||||
{}
|
||||
# kafka:
|
||||
# clusters:
|
||||
# - name: yaml
|
||||
# bootstrapServers: kafka-service:9092
|
||||
# spring:
|
||||
# security:
|
||||
# oauth2:
|
||||
# auth:
|
||||
# type: disabled
|
||||
# management:
|
||||
# health:
|
||||
# ldap:
|
||||
# enabled: false
|
||||
yamlApplicationConfigConfigMap:
|
||||
{}
|
||||
# keyName: config.yml
|
||||
# name: configMapName
|
||||
existingSecret: ""
|
||||
envs:
|
||||
secret: {}
|
||||
config: {}
|
||||
|
||||
networkPolicy:
|
||||
enabled: false
|
||||
egressRules:
|
||||
## Additional custom egress rules
|
||||
## e.g:
|
||||
## customRules:
|
||||
## - to:
|
||||
## - namespaceSelector:
|
||||
## matchLabels:
|
||||
## label: example
|
||||
customRules: []
|
||||
ingressRules:
|
||||
## Additional custom ingress rules
|
||||
## e.g:
|
||||
## customRules:
|
||||
## - from:
|
||||
## - namespaceSelector:
|
||||
## matchLabels:
|
||||
## label: example
|
||||
customRules: []
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
## Annotations to be added to kafka-ui Deployment
|
||||
##
|
||||
annotations: {}
|
||||
|
||||
## Set field schema as HTTPS for readines and liveness probe
|
||||
##
|
||||
probes:
|
||||
useHttpsScheme: false
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
# if you want to force a specific nodePort. Must be use with service.type=NodePort
|
||||
# nodePort:
|
||||
|
||||
# Ingress configuration
|
||||
ingress:
|
||||
# Enable ingress resource
|
||||
enabled: false
|
||||
|
||||
# Annotations for the Ingress
|
||||
annotations: {}
|
||||
|
||||
# ingressClassName for the Ingress
|
||||
ingressClassName: ""
|
||||
|
||||
# The path for the Ingress
|
||||
path: "/"
|
||||
|
||||
# The path type for the Ingress
|
||||
pathType: "Prefix"
|
||||
|
||||
# The hostname for the Ingress
|
||||
host: ""
|
||||
|
||||
# configs for Ingress TLS
|
||||
tls:
|
||||
# Enable TLS termination for the Ingress
|
||||
enabled: false
|
||||
# the name of a pre-created Secret containing a TLS private key and certificate
|
||||
secretName: ""
|
||||
|
||||
# HTTP paths to add to the Ingress before the default path
|
||||
precedingPaths: []
|
||||
|
||||
# Http paths to add to the Ingress after the default path
|
||||
succeedingPaths: []
|
||||
|
||||
resources:
|
||||
{}
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 512Mi
|
||||
# requests:
|
||||
# cpu: 200m
|
||||
# memory: 256Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
env: {}
|
||||
|
||||
initContainers: {}
|
||||
|
||||
volumeMounts: {}
|
||||
|
||||
volumes: {}
|
|
@ -8,9 +8,9 @@
|
|||
6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
|
||||
7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
|
||||
8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafka’s JMX with SSL and authentication.
|
||||
9. [kafka-ui-reverse-proxy.yaml](./nginx-proxy.yaml) - An example for using the app behind a proxy (like nginx).
|
||||
9. [kafka-ui-reverse-proxy.yaml](./kafka-ui-reverse-proxy.yaml) - An example for using the app behind a proxy (like nginx).
|
||||
10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
|
||||
11. [kafka-ui-traefik-proxy.yaml](./traefik-proxy.yaml) - Traefik specific proxy configuration.
|
||||
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
|
||||
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
|
||||
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
|
||||
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
|
|
@ -15,23 +15,26 @@ services:
|
|||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
|
||||
AUTH_TYPE: "LDAP"
|
||||
SPRING_LDAP_URLS: "ldap://ldap:10389"
|
||||
SPRING_LDAP_BASE: "cn={0},ou=people,dc=planetexpress,dc=com"
|
||||
SPRING_LDAP_ADMIN_USER: "cn=admin,dc=planetexpress,dc=com"
|
||||
SPRING_LDAP_ADMIN_PASSWORD: "GoodNewsEveryone"
|
||||
SPRING_LDAP_USER_FILTER_SEARCH_BASE: "dc=planetexpress,dc=com"
|
||||
SPRING_LDAP_USER_FILTER_SEARCH_FILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
|
||||
SPRING_LDAP_GROUP_FILTER_SEARCH_BASE: "ou=people,dc=planetexpress,dc=com"
|
||||
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
|
||||
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
|
||||
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
|
||||
|
||||
# ===== USER SEARCH FILTER INSTEAD OF DN =====
|
||||
|
||||
# SPRING_LDAP_USERFILTER_SEARCHBASE: "dc=planetexpress,dc=com"
|
||||
# SPRING_LDAP_USERFILTER_SEARCHFILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
|
||||
# LDAP ADMIN USER
|
||||
# SPRING_LDAP_ADMINUSER: "cn=admin,dc=planetexpress,dc=com"
|
||||
# SPRING_LDAP_ADMINPASSWORD: "GoodNewsEveryone"
|
||||
|
||||
# ===== ACTIVE DIRECTORY =====
|
||||
|
||||
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
|
||||
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
|
||||
|
||||
ldap:
|
||||
image: rroemhild/test-openldap:latest
|
||||
hostname: "ldap"
|
||||
ports:
|
||||
- 10389:10389
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
|
@ -76,4 +79,4 @@ services:
|
|||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
|
@ -124,7 +124,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./data/message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
kafka0:
|
||||
condition: service_healthy
|
||||
|
@ -187,4 +187,4 @@ services:
|
|||
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
KSQL_KSQL_SERVICE_ID: my_ksql_1
|
||||
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
|
||||
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
|
||||
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
|
|
@ -11,8 +11,4 @@ KafkaClient {
|
|||
user_admin="admin-secret";
|
||||
};
|
||||
|
||||
Client {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zkuser"
|
||||
password="zkuserpassword";
|
||||
};
|
||||
Client {};
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
Server {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
user_zkuser="zkuserpassword";
|
||||
};
|
|
@ -1,2 +1,2 @@
|
|||
rules:
|
||||
- pattern: ".*"
|
||||
- pattern: ".*"
|
|
@ -57,7 +57,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./data/message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
@ -80,4 +80,4 @@ services:
|
|||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
|
84
documentation/compose/kafka-clusters-only.yaml
Normal file
84
documentation/compose/kafka-clusters-only.yaml
Normal file
|
@ -0,0 +1,84 @@
|
|||
---
|
||||
version: "2"
|
||||
services:
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
|
||||
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: "broker,controller"
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
|
||||
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
|
||||
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
|
||||
volumes:
|
||||
- ./scripts/update_run_cluster.sh:/tmp/update_run.sh
|
||||
- ./scripts/clusterID:/tmp/clusterID
|
||||
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
depends_on:
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
ports:
|
||||
- 8085:8085
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
environment:
|
||||
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
||||
CONNECT_GROUP_ID: compose-connect-group
|
||||
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
|
||||
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
|
||||
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
||||
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
|
@ -1,59 +0,0 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- kafka
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
|
||||
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper:3.4.6
|
||||
environment:
|
||||
JVMFLAGS: "-Djava.security.auth.login.config=/etc/zookeeper/zookeeper_jaas.conf"
|
||||
volumes:
|
||||
- ./jaas/zookeeper_jaas.conf:/etc/zookeeper/zookeeper_jaas.conf
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
|
||||
KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.authorizer.AclAuthorizer"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||
KAFKA_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'SASL_PLAINTEXT'
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: 'PLAIN'
|
||||
KAFKA_SECURITY_PROTOCOL: 'SASL_PLAINTEXT'
|
||||
KAFKA_SUPER_USERS: 'User:admin'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
- ./jaas:/etc/kafka/jaas
|
|
@ -20,8 +20,6 @@ services:
|
|||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
|
||||
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
|
||||
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
|
@ -95,7 +93,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
volumes:
|
||||
- ./data/message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -69,7 +69,7 @@ services:
|
|||
build:
|
||||
context: ./kafka-connect
|
||||
args:
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
|
@ -104,7 +104,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./data/message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -4,7 +4,7 @@ services:
|
|||
nginx:
|
||||
image: nginx:latest
|
||||
volumes:
|
||||
- ./data/proxy.conf:/etc/nginx/conf.d/default.conf
|
||||
- ./proxy.conf:/etc/nginx/conf.d/default.conf
|
||||
ports:
|
||||
- 8080:80
|
||||
|
|
@ -115,7 +115,7 @@ services:
|
|||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
|
@ -142,7 +142,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./data/message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -38,7 +38,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./data/message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
22
documentation/compose/oauth-cognito.yaml
Normal file
22
documentation/compose/oauth-cognito.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
---
|
||||
version: '3.4'
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- kafka0 # OMITTED, TAKE UP AN EXAMPLE FROM OTHER COMPOSE FILES
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
AUTH_TYPE: OAUTH2_COGNITO
|
||||
AUTH_COGNITO_ISSUER_URI: "https://cognito-idp.eu-central-1.amazonaws.com/eu-central-xxxxxx"
|
||||
AUTH_COGNITO_CLIENT_ID: ""
|
||||
AUTH_COGNITO_CLIENT_SECRET: ""
|
||||
AUTH_COGNITO_SCOPE: "openid"
|
||||
AUTH_COGNITO_USER_NAME_ATTRIBUTE: "username"
|
||||
AUTH_COGNITO_LOGOUT_URI: "https://<domain>.auth.eu-central-1.amazoncognito.com/logout"
|
|
@ -1,11 +1,7 @@
|
|||
#FROM azul/zulu-openjdk-alpine:17-jre-headless
|
||||
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
|
||||
|
||||
RUN apk add --no-cache \
|
||||
# snappy codec
|
||||
gcompat \
|
||||
# configuring timezones
|
||||
tzdata
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
<artifactId>kafka-ui-api</artifactId>
|
||||
|
||||
<properties>
|
||||
<jacoco.version>0.8.10</jacoco.version>
|
||||
<jacoco.version>0.8.8</jacoco.version>
|
||||
<sonar.java.coveragePlugin>jacoco</sonar.java.coveragePlugin>
|
||||
<sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
|
||||
<sonar.jacoco.reportPath>${project.basedir}/target/jacoco.exec</sonar.jacoco.reportPath>
|
||||
|
@ -21,6 +21,12 @@
|
|||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<!--TODO: remove, when spring-boot fixed dependency to 6.0.8+ (6.0.7 has CVE) -->
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-core</artifactId>
|
||||
<version>6.0.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-webflux</artifactId>
|
||||
|
@ -55,7 +61,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<version>3.12.0</version>
|
||||
<version>3.9</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
|
@ -81,12 +87,6 @@
|
|||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-json-schema-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
|
@ -97,7 +97,7 @@
|
|||
<dependency>
|
||||
<groupId>software.amazon.msk</groupId>
|
||||
<artifactId>aws-msk-iam-auth</artifactId>
|
||||
<version>1.1.7</version>
|
||||
<version>1.1.5</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -115,16 +115,12 @@
|
|||
<groupId>io.projectreactor.addons</groupId>
|
||||
<artifactId>reactor-extra</artifactId>
|
||||
</dependency>
|
||||
<!-- https://github.com/provectus/kafka-ui/pull/3693 -->
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
<version>${org.json.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
@ -141,11 +137,6 @@
|
|||
<artifactId>commons-pool2</artifactId>
|
||||
<version>${apache.commons.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
<version>4.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
|
@ -249,6 +240,8 @@
|
|||
<groupId>org.springframework.security</groupId>
|
||||
<artifactId>spring-security-ldap</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-jsr223</artifactId>
|
||||
|
@ -325,7 +318,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>3.3.0</version>
|
||||
<version>3.1.2</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
|
@ -403,7 +396,7 @@
|
|||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
<version>4.9.10</version>
|
||||
<version>4.0.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>get-the-git-infos</id>
|
||||
|
|
|
@ -51,12 +51,13 @@ public class ClustersProperties {
|
|||
List<Masking> masking;
|
||||
Long pollingThrottleRate;
|
||||
TruststoreConfig ssl;
|
||||
AuditProperties audit;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class PollingProperties {
|
||||
Integer pollTimeoutMs;
|
||||
Integer partitionPollTimeout;
|
||||
Integer noDataEmptyPolls;
|
||||
Integer maxPageSize;
|
||||
Integer defaultPageSize;
|
||||
}
|
||||
|
@ -130,9 +131,8 @@ public class ClustersProperties {
|
|||
@Data
|
||||
public static class Masking {
|
||||
Type type;
|
||||
List<String> fields;
|
||||
String fieldsNamePattern;
|
||||
List<String> maskingCharsReplacement; //used when type=MASK
|
||||
List<String> fields; //if null or empty list - policy will be applied to all fields
|
||||
List<String> pattern; //used when type=MASK
|
||||
String replacement; //used when type=REPLACE
|
||||
String topicKeysPattern;
|
||||
String topicValuesPattern;
|
||||
|
@ -142,23 +142,6 @@ public class ClustersProperties {
|
|||
}
|
||||
}
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public static class AuditProperties {
|
||||
String topic;
|
||||
Integer auditTopicsPartitions;
|
||||
Boolean topicAuditEnabled;
|
||||
Boolean consoleAuditEnabled;
|
||||
LogLevel level;
|
||||
Map<String, String> auditTopicProperties;
|
||||
|
||||
public enum LogLevel {
|
||||
ALL,
|
||||
ALTER_ONLY //default
|
||||
}
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void validateAndSetDefaults() {
|
||||
if (clusters != null) {
|
||||
|
|
|
@ -1,39 +1,18 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.server.reactive.ServerHttpRequest;
|
||||
import org.springframework.http.server.reactive.ServerHttpResponse;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
import org.springframework.web.server.WebFilterChain;
|
||||
import reactor.core.publisher.Mono;
|
||||
import org.springframework.web.reactive.config.CorsRegistry;
|
||||
import org.springframework.web.reactive.config.WebFluxConfigurer;
|
||||
|
||||
@Configuration
|
||||
public class CorsGlobalConfiguration {
|
||||
public class CorsGlobalConfiguration implements WebFluxConfigurer {
|
||||
|
||||
@Bean
|
||||
public WebFilter corsFilter() {
|
||||
return (final ServerWebExchange ctx, final WebFilterChain chain) -> {
|
||||
final ServerHttpRequest request = ctx.getRequest();
|
||||
|
||||
final ServerHttpResponse response = ctx.getResponse();
|
||||
final HttpHeaders headers = response.getHeaders();
|
||||
headers.add("Access-Control-Allow-Origin", "*");
|
||||
headers.add("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, OPTIONS");
|
||||
headers.add("Access-Control-Max-Age", "3600");
|
||||
headers.add("Access-Control-Allow-Headers", "Content-Type");
|
||||
|
||||
if (request.getMethod() == HttpMethod.OPTIONS) {
|
||||
response.setStatusCode(HttpStatus.OK);
|
||||
return Mono.empty();
|
||||
}
|
||||
|
||||
return chain.filter(ctx);
|
||||
};
|
||||
@Override
|
||||
public void addCorsMappings(CorsRegistry registry) {
|
||||
registry.addMapping("/**")
|
||||
.allowedOrigins("*")
|
||||
.allowedMethods("*")
|
||||
.allowedHeaders("*")
|
||||
.allowCredentials(false);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.beans.Transient;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
|
|
@ -13,7 +13,6 @@ abstract class AbstractAuthSecurityConfig {
|
|||
"/resources/**",
|
||||
"/actuator/health/**",
|
||||
"/actuator/info",
|
||||
"/actuator/prometheus",
|
||||
"/auth",
|
||||
"/login",
|
||||
"/logout",
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import lombok.Value;
|
||||
|
||||
public record AuthenticatedUser(String principal, Collection<String> groups) {
|
||||
|
||||
|
|
|
@ -6,13 +6,13 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
|
||||
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
|
||||
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
|
||||
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
|
@ -33,19 +33,15 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
|
||||
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
|
||||
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
|
||||
.logout(spec -> spec
|
||||
.logoutSuccessHandler(logoutSuccessHandler)
|
||||
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
return http
|
||||
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
|
||||
.csrf().disable()
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST).permitAll()
|
||||
.anyExchange().authenticated()
|
||||
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
|
||||
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
|
||||
.and().build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,12 +27,10 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
System.exit(1);
|
||||
}
|
||||
log.warn("Authentication is disabled. Access will be unrestricted.");
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.anyExchange()
|
||||
.permitAll()
|
||||
)
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http.authorizeExchange()
|
||||
.anyExchange().permitAll()
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import lombok.Data;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
@ConfigurationProperties("spring.ldap")
|
||||
@Data
|
||||
public class LdapProperties {
|
||||
|
||||
private String urls;
|
||||
private String base;
|
||||
private String adminUser;
|
||||
private String adminPassword;
|
||||
private String userFilterSearchBase;
|
||||
private String userFilterSearchFilter;
|
||||
private String groupFilterSearchBase;
|
||||
private String groupFilterSearchFilter;
|
||||
private String groupRoleAttribute;
|
||||
|
||||
@Value("${oauth2.ldap.activeDirectory:false}")
|
||||
private boolean isActiveDirectory;
|
||||
@Value("${oauth2.ldap.aсtiveDirectory.domain:@null}")
|
||||
private String activeDirectoryDomain;
|
||||
|
||||
}
|
|
@ -1,151 +1,106 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import static com.provectus.kafka.ui.config.auth.AbstractAuthSecurityConfig.AUTH_WHITELIST;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.service.rbac.extractor.RbacLdapAuthoritiesExtractor;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.ldap.core.DirContextOperations;
|
||||
import org.springframework.ldap.core.support.BaseLdapPathContextSource;
|
||||
import org.springframework.ldap.core.support.LdapContextSource;
|
||||
import org.springframework.security.authentication.AuthenticationManager;
|
||||
import org.springframework.security.authentication.ProviderManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.core.userdetails.UserDetails;
|
||||
import org.springframework.security.ldap.authentication.AbstractLdapAuthenticationProvider;
|
||||
import org.springframework.security.ldap.authentication.BindAuthenticator;
|
||||
import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
|
||||
import org.springframework.security.ldap.authentication.ad.ActiveDirectoryLdapAuthenticationProvider;
|
||||
import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
|
||||
import org.springframework.security.ldap.search.LdapUserSearch;
|
||||
import org.springframework.security.ldap.userdetails.DefaultLdapAuthoritiesPopulator;
|
||||
import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
|
||||
import org.springframework.security.ldap.userdetails.LdapUserDetailsMapper;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
@ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
|
||||
@Import(LdapAutoConfiguration.class)
|
||||
@EnableConfigurationProperties(LdapProperties.class)
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class LdapSecurityConfig {
|
||||
public class LdapSecurityConfig extends AbstractAuthSecurityConfig {
|
||||
|
||||
private final LdapProperties props;
|
||||
@Value("${spring.ldap.urls}")
|
||||
private String ldapUrls;
|
||||
@Value("${spring.ldap.dn.pattern:#{null}}")
|
||||
private String ldapUserDnPattern;
|
||||
@Value("${spring.ldap.adminUser:#{null}}")
|
||||
private String adminUser;
|
||||
@Value("${spring.ldap.adminPassword:#{null}}")
|
||||
private String adminPassword;
|
||||
@Value("${spring.ldap.userFilter.searchBase:#{null}}")
|
||||
private String userFilterSearchBase;
|
||||
@Value("${spring.ldap.userFilter.searchFilter:#{null}}")
|
||||
private String userFilterSearchFilter;
|
||||
|
||||
@Value("${oauth2.ldap.activeDirectory:false}")
|
||||
private boolean isActiveDirectory;
|
||||
@Value("${oauth2.ldap.aсtiveDirectory.domain:#{null}}")
|
||||
private String activeDirectoryDomain;
|
||||
|
||||
@Bean
|
||||
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource,
|
||||
LdapAuthoritiesPopulator authoritiesExtractor,
|
||||
AccessControlService acs) {
|
||||
var rbacEnabled = acs.isRbacEnabled();
|
||||
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource) {
|
||||
BindAuthenticator ba = new BindAuthenticator(contextSource);
|
||||
if (props.getBase() != null) {
|
||||
ba.setUserDnPatterns(new String[] {props.getBase()});
|
||||
if (ldapUserDnPattern != null) {
|
||||
ba.setUserDnPatterns(new String[] {ldapUserDnPattern});
|
||||
}
|
||||
if (props.getUserFilterSearchFilter() != null) {
|
||||
if (userFilterSearchFilter != null) {
|
||||
LdapUserSearch userSearch =
|
||||
new FilterBasedLdapUserSearch(props.getUserFilterSearchBase(), props.getUserFilterSearchFilter(),
|
||||
contextSource);
|
||||
new FilterBasedLdapUserSearch(userFilterSearchBase, userFilterSearchFilter, contextSource);
|
||||
ba.setUserSearch(userSearch);
|
||||
}
|
||||
|
||||
AbstractLdapAuthenticationProvider authenticationProvider;
|
||||
if (!props.isActiveDirectory()) {
|
||||
authenticationProvider = rbacEnabled
|
||||
? new LdapAuthenticationProvider(ba, authoritiesExtractor)
|
||||
: new LdapAuthenticationProvider(ba);
|
||||
if (!isActiveDirectory) {
|
||||
authenticationProvider = new LdapAuthenticationProvider(ba);
|
||||
} else {
|
||||
authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(props.getActiveDirectoryDomain(),
|
||||
props.getUrls()); // TODO Issue #3741
|
||||
authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(activeDirectoryDomain, ldapUrls);
|
||||
authenticationProvider.setUseAuthenticationRequestCredentials(true);
|
||||
}
|
||||
|
||||
if (rbacEnabled) {
|
||||
authenticationProvider.setUserDetailsContextMapper(new UserDetailsMapper());
|
||||
}
|
||||
|
||||
AuthenticationManager am = new ProviderManager(List.of(authenticationProvider));
|
||||
|
||||
return new ReactiveAuthenticationManagerAdapter(am);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public BaseLdapPathContextSource contextSource() {
|
||||
LdapContextSource ctx = new LdapContextSource();
|
||||
ctx.setUrl(props.getUrls());
|
||||
ctx.setUserDn(props.getAdminUser());
|
||||
ctx.setPassword(props.getAdminPassword());
|
||||
ctx.setUrl(ldapUrls);
|
||||
ctx.setUserDn(adminUser);
|
||||
ctx.setPassword(adminPassword);
|
||||
ctx.afterPropertiesSet();
|
||||
return ctx;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public DefaultLdapAuthoritiesPopulator ldapAuthoritiesExtractor(ApplicationContext context,
|
||||
BaseLdapPathContextSource contextSource,
|
||||
AccessControlService acs) {
|
||||
var rbacEnabled = acs != null && acs.isRbacEnabled();
|
||||
|
||||
DefaultLdapAuthoritiesPopulator extractor;
|
||||
|
||||
if (rbacEnabled) {
|
||||
extractor = new RbacLdapAuthoritiesExtractor(context, contextSource, props.getGroupFilterSearchBase());
|
||||
} else {
|
||||
extractor = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
|
||||
}
|
||||
|
||||
Optional.ofNullable(props.getGroupFilterSearchFilter()).ifPresent(extractor::setGroupSearchFilter);
|
||||
extractor.setRolePrefix("");
|
||||
extractor.setConvertToUpperCase(false);
|
||||
extractor.setSearchSubtree(true);
|
||||
return extractor;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SecurityWebFilterChain configureLdap(ServerHttpSecurity http) {
|
||||
log.info("Configuring LDAP authentication.");
|
||||
if (props.isActiveDirectory()) {
|
||||
if (isActiveDirectory) {
|
||||
log.info("Active Directory support for LDAP has been enabled.");
|
||||
}
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(Customizer.withDefaults())
|
||||
.logout(Customizer.withDefaults())
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
http
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
.and()
|
||||
.httpBasic();
|
||||
|
||||
private static class UserDetailsMapper extends LdapUserDetailsMapper {
|
||||
@Override
|
||||
public UserDetails mapUserFromContext(DirContextOperations ctx, String username,
|
||||
Collection<? extends GrantedAuthority> authorities) {
|
||||
UserDetails userDetails = super.mapUserFromContext(ctx, username, authorities);
|
||||
return new RbacLdapUser(userDetails);
|
||||
}
|
||||
return http.csrf().disable().build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -15,16 +14,7 @@ public class OAuthProperties {
|
|||
private Map<String, OAuth2Provider> client = new HashMap<>();
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
getClient().values().forEach((provider) -> {
|
||||
if (provider.getCustomParams() == null) {
|
||||
provider.setCustomParams(Collections.emptyMap());
|
||||
}
|
||||
if (provider.getScope() == null) {
|
||||
provider.setScope(Collections.emptySet());
|
||||
}
|
||||
});
|
||||
|
||||
public void validate() {
|
||||
getClient().values().forEach(this::validateProvider);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,8 @@ public final class OAuthPropertiesConverter {
|
|||
}
|
||||
|
||||
private static boolean isGoogle(OAuth2Provider provider) {
|
||||
return GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
|
||||
return provider.getCustomParams() != null
|
||||
&& GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,11 +12,10 @@ import lombok.extern.log4j.Log4j2;
|
|||
import org.jetbrains.annotations.Nullable;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
|
@ -50,15 +49,21 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
|
||||
log.info("Configuring OAUTH2 authentication.");
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.oauth2Login(Customizer.withDefaults())
|
||||
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
return http.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.oauth2Login()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
.logoutSuccessHandler(logoutHandler)
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -67,13 +72,13 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final OidcReactiveOAuth2UserService delegate = new OidcReactiveOAuth2UserService();
|
||||
return request -> delegate.loadUser(request)
|
||||
.flatMap(user -> {
|
||||
var provider = getProviderByProviderId(request.getClientRegistration().getRegistrationId());
|
||||
final var extractor = getExtractor(provider, acs);
|
||||
String providerId = request.getClientRegistration().getRegistrationId();
|
||||
final var extractor = getExtractor(providerId, acs);
|
||||
if (extractor == null) {
|
||||
return Mono.just(user);
|
||||
}
|
||||
|
||||
return extractor.extract(acs, user, Map.of("request", request, "provider", provider))
|
||||
return extractor.extract(acs, user, Map.of("request", request))
|
||||
.map(groups -> new RbacOidcUser(user, groups));
|
||||
});
|
||||
}
|
||||
|
@ -83,13 +88,13 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final DefaultReactiveOAuth2UserService delegate = new DefaultReactiveOAuth2UserService();
|
||||
return request -> delegate.loadUser(request)
|
||||
.flatMap(user -> {
|
||||
var provider = getProviderByProviderId(request.getClientRegistration().getRegistrationId());
|
||||
final var extractor = getExtractor(provider, acs);
|
||||
String providerId = request.getClientRegistration().getRegistrationId();
|
||||
final var extractor = getExtractor(providerId, acs);
|
||||
if (extractor == null) {
|
||||
return Mono.just(user);
|
||||
}
|
||||
|
||||
return extractor.extract(acs, user, Map.of("request", request, "provider", provider))
|
||||
return extractor.extract(acs, user, Map.of("request", request))
|
||||
.map(groups -> new RbacOAuth2User(user, groups));
|
||||
});
|
||||
}
|
||||
|
@ -98,10 +103,7 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
|
||||
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
|
||||
final List<ClientRegistration> registrations =
|
||||
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
|
||||
if (registrations.isEmpty()) {
|
||||
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
|
||||
}
|
||||
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
|
||||
return new InMemoryReactiveClientRegistrationRepository(registrations);
|
||||
}
|
||||
|
||||
|
@ -111,18 +113,18 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
private ProviderAuthorityExtractor getExtractor(final OAuthProperties.OAuth2Provider provider,
|
||||
AccessControlService acs) {
|
||||
Optional<ProviderAuthorityExtractor> extractor = acs.getOauthExtractors()
|
||||
private ProviderAuthorityExtractor getExtractor(final String providerId, AccessControlService acs) {
|
||||
final String provider = getProviderByProviderId(providerId);
|
||||
Optional<ProviderAuthorityExtractor> extractor = acs.getExtractors()
|
||||
.stream()
|
||||
.filter(e -> e.isApplicable(provider.getProvider(), provider.getCustomParams()))
|
||||
.filter(e -> e.isApplicable(provider))
|
||||
.findFirst();
|
||||
|
||||
return extractor.orElse(null);
|
||||
}
|
||||
|
||||
private OAuthProperties.OAuth2Provider getProviderByProviderId(final String providerId) {
|
||||
return properties.getClient().get(providerId);
|
||||
private String getProviderByProviderId(final String providerId) {
|
||||
return properties.getClient().get(providerId).getProvider();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.stream.Collectors;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.core.userdetails.UserDetails;
|
||||
|
||||
public class RbacLdapUser implements UserDetails, RbacUser {
|
||||
|
||||
private final UserDetails userDetails;
|
||||
|
||||
public RbacLdapUser(UserDetails userDetails) {
|
||||
this.userDetails = userDetails;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return userDetails.getUsername();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> groups() {
|
||||
return userDetails.getAuthorities().stream().map(GrantedAuthority::getAuthority).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<? extends GrantedAuthority> getAuthorities() {
|
||||
return userDetails.getAuthorities();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPassword() {
|
||||
return userDetails.getPassword();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUsername() {
|
||||
return userDetails.getUsername();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAccountNonExpired() {
|
||||
return userDetails.isAccountNonExpired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAccountNonLocked() {
|
||||
return userDetails.isAccountNonLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCredentialsNonExpired() {
|
||||
return userDetails.isCredentialsNonExpired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return userDetails.isEnabled();
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.user.OAuth2User;
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.AllNestedConditions;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
|
||||
public class ActiveDirectoryCondition extends AllNestedConditions {
|
||||
|
||||
public ActiveDirectoryCondition() {
|
||||
super(ConfigurationPhase.PARSE_CONFIGURATION);
|
||||
}
|
||||
|
||||
@ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
|
||||
public static class OnAuthType {
|
||||
|
||||
}
|
||||
|
||||
@ConditionalOnProperty(value = "${oauth2.ldap.activeDirectory}:false", havingValue = "true", matchIfMissing = false)
|
||||
public static class OnActiveDirectory {
|
||||
|
||||
}
|
||||
}
|
|
@ -1,14 +1,13 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
public class CognitoCondition extends AbstractProviderCondition implements Condition {
|
||||
@Override
|
||||
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
|
||||
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
|
||||
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -46,8 +46,10 @@ public class CognitoLogoutSuccessHandler implements LogoutSuccessHandler {
|
|||
.fragment(null)
|
||||
.build();
|
||||
|
||||
Assert.isTrue(provider.getCustomParams().containsKey("logoutUrl"),
|
||||
"Custom params should contain 'logoutUrl'");
|
||||
Assert.isTrue(
|
||||
provider.getCustomParams() != null && provider.getCustomParams().containsKey("logoutUrl"),
|
||||
"Custom params should contain 'logoutUrl'"
|
||||
);
|
||||
final var uri = UriComponentsBuilder
|
||||
.fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
|
||||
.queryParam("client_id", provider.getClientId())
|
||||
|
|
|
@ -2,19 +2,12 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
||||
public abstract class AbstractController {
|
||||
|
||||
protected ClustersStorage clustersStorage;
|
||||
protected AccessControlService accessControlService;
|
||||
protected AuditService auditService;
|
||||
private ClustersStorage clustersStorage;
|
||||
|
||||
protected KafkaCluster getCluster(String name) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
|
@ -22,26 +15,8 @@ public abstract class AbstractController {
|
|||
String.format("Cluster with name '%s' not found", name)));
|
||||
}
|
||||
|
||||
protected Mono<Void> validateAccess(AccessContext context) {
|
||||
return accessControlService.validateAccess(context);
|
||||
}
|
||||
|
||||
protected void audit(AccessContext acxt, Signal<?> sig) {
|
||||
auditService.audit(acxt, sig);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||
this.clustersStorage = clustersStorage;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAccessControlService(AccessControlService accessControlService) {
|
||||
this.accessControlService = accessControlService;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAuditService(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,10 +12,8 @@ import java.security.Principal;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import javax.annotation.Nullable;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.context.ReactiveSecurityContextHolder;
|
||||
import org.springframework.security.core.context.SecurityContext;
|
||||
|
@ -25,19 +23,22 @@ import reactor.core.publisher.Mono;
|
|||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class AccessController implements AuthorizationApi {
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
public Mono<ResponseEntity<AuthenticationInfoDTO>> getUserAuthInfo(ServerWebExchange exchange) {
|
||||
AuthenticationInfoDTO dto = new AuthenticationInfoDTO();
|
||||
dto.setRbacEnabled(accessControlService.isRbacEnabled());
|
||||
UserInfoDTO userInfo = new UserInfoDTO();
|
||||
|
||||
Mono<List<UserPermissionDTO>> permissions = accessControlService.getUser()
|
||||
.map(user -> accessControlService.getRoles()
|
||||
.stream()
|
||||
.filter(role -> user.groups().contains(role.getName()))
|
||||
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
|
||||
.flatMap(Collection::stream)
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.switchIfEmpty(Mono.just(Collections.emptyList()));
|
||||
|
||||
|
@ -48,11 +49,13 @@ public class AccessController implements AuthorizationApi {
|
|||
return userName
|
||||
.zipWith(permissions)
|
||||
.map(data -> {
|
||||
var dto = new AuthenticationInfoDTO(accessControlService.isRbacEnabled());
|
||||
dto.setUserInfo(new UserInfoDTO(data.getT1(), data.getT2()));
|
||||
userInfo.setUsername(data.getT1());
|
||||
userInfo.setPermissions(data.getT2());
|
||||
|
||||
dto.setUserInfo(userInfo);
|
||||
return dto;
|
||||
})
|
||||
.switchIfEmpty(Mono.just(new AuthenticationInfoDTO(accessControlService.isRbacEnabled())))
|
||||
.switchIfEmpty(Mono.just(dto))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
|
@ -67,22 +70,11 @@ public class AccessController implements AuthorizationApi {
|
|||
dto.setActions(permission.getActions()
|
||||
.stream()
|
||||
.map(String::toUpperCase)
|
||||
.map(this::mapAction)
|
||||
.filter(Objects::nonNull)
|
||||
.toList());
|
||||
.map(ActionDTO::valueOf)
|
||||
.collect(Collectors.toList()));
|
||||
return dto;
|
||||
})
|
||||
.toList();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private ActionDTO mapAction(String name) {
|
||||
try {
|
||||
return ActionDTO.fromValue(name);
|
||||
} catch (IllegalArgumentException e) {
|
||||
log.warn("Unknown Action [{}], skipping", name);
|
||||
return null;
|
||||
}
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,176 +0,0 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.AclsApi;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.service.acl.AclsService;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
import org.apache.kafka.common.resource.ResourcePatternFilter;
|
||||
import org.apache.kafka.common.resource.ResourceType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class AclsController extends AbstractController implements AclsApi {
|
||||
|
||||
private final AclsService aclsService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("deleteAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<KafkaAclDTO>>> listAcls(String clusterName,
|
||||
KafkaAclResourceTypeDTO resourceTypeDto,
|
||||
String resourceName,
|
||||
KafkaAclNamePatternTypeDTO namePatternTypeDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.VIEW)
|
||||
.operationName("listAcls")
|
||||
.build();
|
||||
|
||||
var resourceType = Optional.ofNullable(resourceTypeDto)
|
||||
.map(ClusterMapper::mapAclResourceTypeDto)
|
||||
.orElse(ResourceType.ANY);
|
||||
|
||||
var namePatternType = Optional.ofNullable(namePatternTypeDto)
|
||||
.map(ClusterMapper::mapPatternTypeDto)
|
||||
.orElse(PatternType.ANY);
|
||||
|
||||
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
|
||||
|
||||
return validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
aclsService.listAcls(getCluster(clusterName), filter)
|
||||
.map(ClusterMapper::toKafkaAclDto)))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<String>> getAclAsCsv(String clusterName, ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.VIEW)
|
||||
.operationName("getAclAsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
aclsService.getAclAsCsvString(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.flatMap(Mono::just)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> syncAclsCsv(String clusterName, Mono<String> csvMono, ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("syncAclsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(csvMono)
|
||||
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
|
||||
Mono<CreateConsumerAclDTO> createConsumerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createConsumerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createConsumerAclDto)
|
||||
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
|
||||
Mono<CreateProducerAclDTO> createProducerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createProducerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createProducerAclDto)
|
||||
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
|
||||
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createStreamAppAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createStreamAppAclDto)
|
||||
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
}
|
|
@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ApplicationInfoService;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
|
@ -26,7 +27,6 @@ import org.mapstruct.Mapper;
|
|||
import org.mapstruct.factory.Mappers;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.http.codec.multipart.FilePart;
|
||||
import org.springframework.http.codec.multipart.Part;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
@ -37,7 +37,7 @@ import reactor.util.function.Tuples;
|
|||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
|
@ -49,6 +49,7 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
|
@ -61,69 +62,59 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.operationName("getCurrentConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.build()
|
||||
)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("restartWithConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(restartRequestDto)
|
||||
.doOnNext(restartDto -> {
|
||||
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
|
||||
dynamicConfigOperations.persist(newConfig);
|
||||
})
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnSuccess(dto -> restarter.requestRestart())
|
||||
.map(dto -> ResponseEntity.ok().build());
|
||||
.map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("uploadConfigRelatedFile")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(fileFlux.single())
|
||||
.flatMap(file ->
|
||||
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(FilePart file, ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(dynamicConfigOperations.uploadConfigRelatedFile(file))
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("validateConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(configDto)
|
||||
return configDto
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = newConfig.getKafka();
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
|
|
|
@ -36,10 +36,10 @@ public class AuthController {
|
|||
+ " <meta name=\"description\" content=\"\">\n"
|
||||
+ " <meta name=\"author\" content=\"\">\n"
|
||||
+ " <title>Please sign in</title>\n"
|
||||
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
|
||||
+ "crossorigin=\"anonymous\">\n"
|
||||
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
|
||||
+ " <link href=\"/static/css/signin.css\" "
|
||||
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
|
||||
+ " </head>\n"
|
||||
+ " <body>\n"
|
||||
|
|
|
@ -11,9 +11,8 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.service.BrokerService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -26,79 +25,63 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class BrokersController extends AbstractController implements BrokersApi {
|
||||
private static final String BROKER_ID = "brokerId";
|
||||
|
||||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getBrokers")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(job));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getBrokersMetrics")
|
||||
.operationParams(Map.of("id", id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
|
||||
@Nullable List<Integer> brokers,
|
||||
List<Integer> brokers,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
List<Integer> brokerIds = brokers == null ? List.of() : brokers;
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getAllBrokersLogdirs")
|
||||
.operationParams(Map.of("brokerIds", brokerIds))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
|
||||
Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW)
|
||||
.operationName("getBrokerConfig")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
return validateAccess.thenReturn(
|
||||
ResponseEntity.ok(
|
||||
brokerService.getBrokerConfig(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerConfig))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -106,18 +89,16 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
Integer id,
|
||||
Mono<BrokerLogdirUpdateDTO> brokerLogdir,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerTopicPartitionLogDir")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
brokerLogdir
|
||||
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -126,18 +107,16 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
String name,
|
||||
Mono<BrokerConfigItemDTO> brokerConfig,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerConfigByName")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
brokerConfig
|
||||
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||
getCluster(clusterName), id, name, bci.getValue()))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
|||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -19,6 +20,7 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class ClustersController extends AbstractController implements ClustersApi {
|
||||
private final ClusterService clusterService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
|
@ -33,16 +35,14 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getClusterMetrics")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterMetrics(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,16 +50,14 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getClusterStats")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterStats(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -68,11 +66,11 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("updateClusterInfo")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,11 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -39,6 +41,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
|
||||
private final ConsumerGroupService consumerGroupService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Value("${consumer.groups.page.size:25}")
|
||||
private int defaultConsumerGroupsPageSize;
|
||||
|
@ -47,47 +50,44 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
|
||||
String id,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.consumerGroup(id)
|
||||
.consumerGroupActions(DELETE)
|
||||
.operationName("deleteConsumerGroup")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
return validateAccess.then(
|
||||
consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
|
||||
String consumerGroupId,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.consumerGroup(consumerGroupId)
|
||||
.consumerGroupActions(VIEW)
|
||||
.operationName("getConsumerGroup")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
return validateAccess.then(
|
||||
consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
.map(ConsumerGroupMapper::toDetailsDto)
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.operationName("getTopicConsumerGroups")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
|
||||
consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
|
||||
|
@ -99,9 +99,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(job);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -114,13 +112,12 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
SortOrderDTO sortOrderDto,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
// consumer group access validation is within the service
|
||||
.operationName("getConsumerGroupsPage")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
return validateAccess(context).then(
|
||||
return validateAccess.then(
|
||||
consumerGroupService.getConsumerGroupsPage(
|
||||
getCluster(clusterName),
|
||||
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
|
||||
|
@ -131,7 +128,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
)
|
||||
.map(this::convertPage)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -140,13 +137,12 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
Mono<ConsumerGroupOffsetsResetDTO> resetDto,
|
||||
ServerWebExchange exchange) {
|
||||
return resetDto.flatMap(reset -> {
|
||||
var context = AccessContext.builder()
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(reset.getTopic())
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.consumerGroupActions(RESET_OFFSETS)
|
||||
.operationName("resetConsumerGroupOffsets")
|
||||
.build();
|
||||
.build());
|
||||
|
||||
Supplier<Mono<Void>> mono = () -> {
|
||||
var cluster = getCluster(clusterName);
|
||||
|
@ -186,9 +182,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
}
|
||||
};
|
||||
|
||||
return validateAccess(context)
|
||||
.then(mono.get())
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
return validateAccess.then(mono.get());
|
||||
}).thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -199,7 +193,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
|
||||
.stream()
|
||||
.map(ConsumerGroupMapper::toDto)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue