Compare commits
198 commits
bulk-conne
...
master
Author | SHA1 | Date | |
---|---|---|---|
![]() |
83b5a60cc0 | ||
![]() |
3dc4446321 | ||
![]() |
53a6553765 | ||
![]() |
fc97dfa874 | ||
![]() |
68f08a0c9b | ||
![]() |
cc12814a95 | ||
![]() |
5d5358010b | ||
![]() |
de2f06ccf8 | ||
![]() |
ff106a2061 | ||
![]() |
c00cb320cd | ||
![]() |
8a1e9ad8e8 | ||
![]() |
39bb860f8e | ||
![]() |
f66d234d83 | ||
![]() |
68a7268f8b | ||
![]() |
aca3d25dc8 | ||
![]() |
0616883fee | ||
![]() |
59584ed369 | ||
![]() |
bbb739af92 | ||
![]() |
145bf07b5d | ||
![]() |
ceb821acdf | ||
![]() |
d2b0cc51e3 | ||
![]() |
9e7bc02c8a | ||
![]() |
2836b2f5d2 | ||
![]() |
a47848f809 | ||
![]() |
5c9fb994a4 | ||
![]() |
14efe9da1e | ||
![]() |
6676747606 | ||
![]() |
b0583a3ca7 | ||
![]() |
4ec7975b2e | ||
![]() |
c05abc1e0a | ||
![]() |
729ca79581 | ||
![]() |
80024c8758 | ||
![]() |
0d6f293ab9 | ||
![]() |
8f2a29d15d | ||
![]() |
552691fc5d | ||
![]() |
342b534ac9 | ||
![]() |
2051f6f653 | ||
![]() |
b2b02a5d60 | ||
![]() |
d7eb3ba99e | ||
![]() |
7de883d3ab | ||
![]() |
4519d9a48c | ||
![]() |
cca2c96997 | ||
![]() |
844eb17d7a | ||
![]() |
37a6e62684 | ||
![]() |
4f211b39ba | ||
![]() |
8d35761b8d | ||
![]() |
b12a0634a0 | ||
![]() |
8d402798c5 | ||
![]() |
ed9f91fd8a | ||
![]() |
d2a5acc82d | ||
![]() |
7a82079471 | ||
![]() |
9acbf2b681 | ||
![]() |
5f89e3b97e | ||
![]() |
1df8625fc8 | ||
![]() |
c8ad262d77 | ||
![]() |
bdbbdcccbe | ||
![]() |
3114509ebf | ||
![]() |
6224b12ed3 | ||
![]() |
78e53d7d93 | ||
![]() |
f9e89661d7 | ||
![]() |
2a61b97fab | ||
![]() |
b32ab01436 | ||
![]() |
fa9547b95a | ||
![]() |
d915de4fd8 | ||
![]() |
150fc21fb8 | ||
![]() |
ba18f3b042 | ||
![]() |
ac09efcd34 | ||
![]() |
333eae2475 | ||
![]() |
69ebd3d52b | ||
![]() |
6a40146fb1 | ||
![]() |
4515ecaf41 | ||
![]() |
92157bdd39 | ||
![]() |
8126607b91 | ||
![]() |
77f1ec9490 | ||
![]() |
3cde6c21ec | ||
![]() |
15f4543402 | ||
![]() |
c96a0c6be5 | ||
![]() |
b2c3fcc321 | ||
![]() |
1cd303a90b | ||
![]() |
895d27a306 | ||
![]() |
476cbfb691 | ||
![]() |
2db89593a7 | ||
![]() |
0b99f745b0 | ||
![]() |
7eaae31345 | ||
![]() |
ca2d53f936 | ||
![]() |
a32272d07e | ||
![]() |
32cd55928a | ||
![]() |
d4001b5a39 | ||
![]() |
f124fa632d | ||
![]() |
8ae8ae40a4 | ||
![]() |
5f231c7681 | ||
![]() |
17cde82dff | ||
![]() |
9ab4580c47 | ||
![]() |
d572e43b4f | ||
![]() |
ab58618d83 | ||
![]() |
216c87670d | ||
![]() |
e57b0bac43 | ||
![]() |
0c732db436 | ||
![]() |
d26490e82e | ||
![]() |
be2f9f0605 | ||
![]() |
50b9c56112 | ||
![]() |
401c9f12c1 | ||
![]() |
b700ac3991 | ||
![]() |
b9bbb1a823 | ||
![]() |
81805703c8 | ||
![]() |
6b67313d1a | ||
![]() |
9549f68d7e | ||
![]() |
8337c9c183 | ||
![]() |
b1ac3482db | ||
![]() |
cdb4f84e23 | ||
![]() |
4134d68316 | ||
![]() |
742e6eed3e | ||
![]() |
328d91de8b | ||
![]() |
c743067ffa | ||
![]() |
7f7242eb8b | ||
![]() |
593ef7ec9c | ||
![]() |
55ed7f4821 | ||
![]() |
e60fe062b6 | ||
![]() |
0a35038826 | ||
![]() |
fa65ec2753 | ||
![]() |
f84bbb9ebb | ||
![]() |
d14b935765 | ||
![]() |
f2ef0c2793 | ||
![]() |
c998e17e83 | ||
![]() |
d0088490a4 | ||
![]() |
6fe6165427 | ||
![]() |
2ac8646769 | ||
![]() |
af2cff20b6 | ||
![]() |
2fb05ca947 | ||
![]() |
fdd4947142 | ||
![]() |
5c59239456 | ||
![]() |
9a2f6bfc8e | ||
![]() |
5d23f2a4ed | ||
![]() |
20bb274f0e | ||
![]() |
03b7d1bd60 | ||
![]() |
100bb1dac6 | ||
![]() |
c355955641 | ||
![]() |
4b724fd852 | ||
![]() |
cd9bc43d2e | ||
![]() |
73bd6ca3a5 | ||
![]() |
8a68ba0778 | ||
![]() |
e118aaba3d | ||
![]() |
5771c11316 | ||
![]() |
29d91bca4b | ||
![]() |
7e47906d88 | ||
![]() |
f19abb2036 | ||
![]() |
61bf71f9b7 | ||
![]() |
004de798e4 | ||
![]() |
80b748b02e | ||
![]() |
71a7a1ec84 | ||
![]() |
0099169a2b | ||
![]() |
ab9d0e2b3f | ||
![]() |
f22c910f5c | ||
![]() |
1b9c189bfa | ||
![]() |
63f71b8a05 | ||
![]() |
17ea464ec1 | ||
![]() |
f7900ba478 | ||
![]() |
f7d85d86e6 | ||
![]() |
62bee1ced8 | ||
![]() |
baeb494f53 | ||
![]() |
ba6d6b2b1f | ||
![]() |
c7cb7a4027 | ||
![]() |
902f11a1d9 | ||
![]() |
0796bf0112 | ||
![]() |
6a50a8ecee | ||
![]() |
78cc4dd981 | ||
![]() |
fdd9ad94c1 | ||
![]() |
1c35ded909 | ||
![]() |
e7429ce6c6 | ||
![]() |
1d8c6197ac | ||
![]() |
52a42e698e | ||
![]() |
aa7429eeba | ||
![]() |
3ca417f64a | ||
![]() |
43ec02ce30 | ||
![]() |
725c95f348 | ||
![]() |
3ef5a9f492 | ||
![]() |
cfcfb851c6 | ||
![]() |
c813e74609 | ||
![]() |
e31cd2e442 | ||
![]() |
bc85924d7d | ||
![]() |
9ac8549d7d | ||
![]() |
f6fe14cea5 | ||
![]() |
a1e7a20887 | ||
![]() |
97a694b3f0 | ||
![]() |
61fb62276e | ||
![]() |
db86942e47 | ||
![]() |
5e539f1ba8 | ||
![]() |
147b539c37 | ||
![]() |
379d9926df | ||
![]() |
86a7ba44fb | ||
![]() |
727f38401b | ||
![]() |
690dcd3f74 | ||
![]() |
7857bd5000 | ||
![]() |
abfdf97a9f | ||
![]() |
c7a7921b82 | ||
![]() |
744bdb32a3 | ||
![]() |
da3932e342 | ||
![]() |
4e25522078 |
421 changed files with 13013 additions and 6511 deletions
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
|
@ -14,5 +14,5 @@
|
|||
# TESTS
|
||||
/kafka-ui-e2e-checks/ @provectus/kafka-qa
|
||||
|
||||
# HELM CHARTS
|
||||
/charts/ @provectus/kafka-devops
|
||||
# INFRA
|
||||
/.github/workflows/ @provectus/kafka-devops
|
||||
|
|
3
.github/ISSUE_TEMPLATE/config.yml
vendored
3
.github/ISSUE_TEMPLATE/config.yml
vendored
|
@ -1,5 +1,8 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Report helm issue
|
||||
url: https://github.com/provectus/kafka-ui-charts
|
||||
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
|
||||
- name: Official documentation
|
||||
url: https://docs.kafka-ui.provectus.io/
|
||||
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
|
||||
|
|
92
.github/ISSUE_TEMPLATE/helm.yml
vendored
92
.github/ISSUE_TEMPLATE/helm.yml
vendored
|
@ -1,92 +0,0 @@
|
|||
name: "⎈ K8s/Helm problem report"
|
||||
description: "Report a problem with k8s/helm charts/etc"
|
||||
labels: ["status/triage", "scope/k8s"]
|
||||
assignees: []
|
||||
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Hi, thanks for raising the issue(-s), all contributions really matter!
|
||||
Please, note that we'll close the issue without further explanation if you don't follow
|
||||
this template and don't provide the information requested within this template.
|
||||
|
||||
- type: checkboxes
|
||||
id: terms
|
||||
attributes:
|
||||
label: Issue submitter TODO list
|
||||
description: By you checking these checkboxes we can be sure you've done the essential things.
|
||||
options:
|
||||
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
|
||||
required: true
|
||||
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
|
||||
required: true
|
||||
- label: I've tried running `master`-labeled docker image and the issue still persists there
|
||||
required: true
|
||||
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the bug (actual behavior)
|
||||
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A clear and concise description of what you expected to happen
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Your installation details
|
||||
description: |
|
||||
How do you run the app? Please provide as much info as possible:
|
||||
1. App version (commit hash in the top left corner of the UI)
|
||||
2. Helm chart version
|
||||
3. Your application config. Please remove the sensitive info like passwords or API keys.
|
||||
4. Any IAAC configs
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce
|
||||
description: |
|
||||
Please write down the order of the actions required to reproduce the issue.
|
||||
For the advanced setups/complicated issue, we might need you to provide
|
||||
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: |
|
||||
If applicable, add screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Logs
|
||||
description: |
|
||||
If applicable, *upload* screenshots to help explain your problem
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: |
|
||||
Add any other context about the problem here. E.G.:
|
||||
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
|
||||
Were they successful or the same issue occurred? Please provide steps as well.
|
||||
2. Related issues (if there are any).
|
||||
3. Logs (if available)
|
||||
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
|
||||
validations:
|
||||
required: false
|
8
.github/release_drafter.yaml
vendored
8
.github/release_drafter.yaml
vendored
|
@ -16,18 +16,26 @@ exclude-labels:
|
|||
- 'type/refactoring'
|
||||
|
||||
categories:
|
||||
- title: '🚩 Breaking Changes'
|
||||
labels:
|
||||
- 'impact/changelog'
|
||||
|
||||
- title: '⚙️Features'
|
||||
labels:
|
||||
- 'type/feature'
|
||||
|
||||
- title: '🪛Enhancements'
|
||||
labels:
|
||||
- 'type/enhancement'
|
||||
|
||||
- title: '🔨Bug Fixes'
|
||||
labels:
|
||||
- 'type/bug'
|
||||
|
||||
- title: 'Security'
|
||||
labels:
|
||||
- 'type/security'
|
||||
|
||||
- title: '⎈ Helm/K8S Changes'
|
||||
labels:
|
||||
- 'scope/k8s'
|
||||
|
|
6
.github/workflows/aws_publisher.yaml
vendored
6
.github/workflows/aws_publisher.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: AWS Marketplace Publisher
|
||||
name: "Infra: Release: AWS Marketplace Publisher"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -24,14 +24,14 @@ jobs:
|
|||
- name: Clone infra repo
|
||||
run: |
|
||||
echo "Cloning repo..."
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
|
||||
echo "Cd to packer DIR..."
|
||||
cd kafka-ui-infra/ami
|
||||
echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV
|
||||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
7
.github/workflows/backend.yml
vendored
7
.github/workflows/backend.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Backend build and test
|
||||
name: "Backend: PR/master build & test"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -8,6 +8,9 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-api/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -29,7 +32,7 @@ jobs:
|
|||
key: ${{ runner.os }}-sonar
|
||||
restore-keys: ${{ runner.os }}-sonar
|
||||
- name: Build and analyze pull request target
|
||||
if: ${{ github.event_name == 'pull_request_target' }}
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
|
||||
|
|
4
.github/workflows/block_merge.yml
vendored
4
.github/workflows/block_merge.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Pull Request Labels
|
||||
name: "Infra: PR block merge"
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, labeled, unlabeled, synchronize]
|
||||
|
@ -6,7 +6,7 @@ jobs:
|
|||
block_merge:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: mheap/github-action-required-labels@v4
|
||||
- uses: mheap/github-action-required-labels@v5
|
||||
with:
|
||||
mode: exactly
|
||||
count: 0
|
||||
|
|
34
.github/workflows/branch-deploy.yml
vendored
34
.github/workflows/branch-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Feature testing init
|
||||
name: "Infra: Feature Testing: Init env"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
|
@ -45,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -73,29 +73,33 @@ jobs:
|
|||
steps:
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
- name: create deployment
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
echo "Branch:${{ needs.build.outputs.tag }}"
|
||||
./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git add ../kafka-ui-from-branch/
|
||||
git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
|
||||
|
||||
- name: make comment with private deployment link
|
||||
- name: update status check for private deployment
|
||||
if: ${{ github.event.label.name == 'status/feature_testing' }}
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Click Details button to open custom deployment page"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
|
||||
|
||||
- name: make comment with public deployment link
|
||||
- name: update status check for public deployment
|
||||
if: ${{ github.event.label.name == 'status/feature_testing_public' }}
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
uses: Sibz/github-status-action@v1.1.6
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes
|
||||
authToken: ${{secrets.GITHUB_TOKEN}}
|
||||
context: "Click Details button to open custom deployment page"
|
||||
state: "success"
|
||||
sha: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
|
||||
|
|
14
.github/workflows/branch-remove.yml
vendored
14
.github/workflows/branch-remove.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Feature testing destroy
|
||||
name: "Infra: Feature Testing: Destroy env"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -11,18 +11,12 @@ jobs:
|
|||
- uses: actions/checkout@v3
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
- name: remove env
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
./delete-env.sh pr${{ github.event.pull_request.number }} || true
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git add ../kafka-ui-from-branch/
|
||||
git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
|
||||
- name: make comment with deployment link
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Custom deployment removed
|
||||
|
|
5
.github/workflows/build-public-image.yml
vendored
5
.github/workflows/build-public-image.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Build Docker image and push
|
||||
name: "Infra: Image Testing: Deploy"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -70,6 +70,5 @@ jobs:
|
|||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
|
||||
|
||||
outputs:
|
||||
tag: ${{ steps.extract_branch.outputs.tag }}
|
||||
|
|
28
.github/workflows/create-branch-for-helm.yaml
vendored
28
.github/workflows/create-branch-for-helm.yaml
vendored
|
@ -1,28 +0,0 @@
|
|||
name: Prepare helm release
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [prepare-helm-release]
|
||||
jobs:
|
||||
change-app-version:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
- name: Change versions
|
||||
run: |
|
||||
git checkout -b release-${{ github.event.client_payload.appversion}}
|
||||
version=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
version=${version%.*}.$((${version##*.}+1))
|
||||
sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
|
||||
sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
|
||||
git add charts/kafka-ui/Chart.yaml
|
||||
git commit -m "release ${version}"
|
||||
git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
|
||||
- name: Slack Notification
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
|
||||
SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
2
.github/workflows/cve.yaml
vendored
2
.github/workflows/cve.yaml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Run CVE checks
|
||||
uses: aquasecurity/trivy-action@0.10.0
|
||||
uses: aquasecurity/trivy-action@0.12.0
|
||||
with:
|
||||
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
|
||||
format: "table"
|
||||
|
|
10
.github/workflows/delete-public-image.yml
vendored
10
.github/workflows/delete-public-image.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Delete Public ECR Image
|
||||
name: "Infra: Image Testing: Delete"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -32,9 +32,3 @@ jobs:
|
|||
--repository-name kafka-ui-custom-build \
|
||||
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
|
||||
--region us-east-1
|
||||
- name: make comment with private deployment link
|
||||
uses: peter-evans/create-or-update-comment@v3
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed
|
||||
|
|
2
.github/workflows/documentation.yaml
vendored
2
.github/workflows/documentation.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Documentation URLs linter
|
||||
name: "Infra: Docs: URL linter"
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
4
.github/workflows/e2e-automation.yml
vendored
4
.github/workflows/e2e-automation.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: E2E Automation suite
|
||||
name: "E2E: Automation suite"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
12
.github/workflows/e2e-checks.yaml
vendored
12
.github/workflows/e2e-checks.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: E2E PR health check
|
||||
name: "E2E: PR healthcheck"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [ "opened", "edited", "reopened", "synchronize" ]
|
||||
|
@ -8,6 +8,8 @@ on:
|
|||
- "kafka-ui-react-app/**"
|
||||
- "kafka-ui-e2e-checks/**"
|
||||
- "pom.xml"
|
||||
permissions:
|
||||
statuses: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -16,10 +18,10 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: eu-central-1
|
||||
- name: Set up environment
|
||||
id: set_env_values
|
||||
|
@ -45,7 +47,7 @@ jobs:
|
|||
# use the following command until #819 will be fixed
|
||||
run: |
|
||||
docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
|
||||
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
|
||||
docker-compose -f ./documentation/compose/e2e-tests.yaml up -d && until [ "$(docker exec kafka-ui wget --spider --server-response http://localhost:8080/actuator/health 2>&1 | grep -c 'HTTP/1.1 200 OK')" == "1" ]; do echo "Waiting for kafka-ui ..." && sleep 1; done
|
||||
- name: Run test suite
|
||||
run: |
|
||||
./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
|
||||
|
|
2
.github/workflows/e2e-manual.yml
vendored
2
.github/workflows/e2e-manual.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: E2E Manual suite
|
||||
name: "E2E: Manual suite"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
4
.github/workflows/e2e-weekly.yml
vendored
4
.github/workflows/e2e-weekly.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: E2E Weekly suite
|
||||
name: "E2E: Weekly suite"
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 1 * * 1'
|
||||
|
@ -11,7 +11,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
15
.github/workflows/frontend.yaml
vendored
15
.github/workflows/frontend.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Frontend build and test
|
||||
name: "Frontend: PR/master build & test"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
|
@ -8,6 +8,9 @@ on:
|
|||
paths:
|
||||
- "kafka-ui-contract/**"
|
||||
- "kafka-ui-react-app/**"
|
||||
permissions:
|
||||
checks: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
build-and-test:
|
||||
env:
|
||||
|
@ -20,13 +23,13 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
with:
|
||||
version: 7.4.0
|
||||
version: 8.6.12
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.6.0
|
||||
uses: actions/setup-node@v3.8.1
|
||||
with:
|
||||
node-version: "16.15.0"
|
||||
node-version: "18.17.1"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
|
||||
- name: Install Node dependencies
|
||||
|
@ -46,7 +49,7 @@ jobs:
|
|||
cd kafka-ui-react-app/
|
||||
pnpm test:CI
|
||||
- name: SonarCloud Scan
|
||||
uses: workshur/sonarcloud-github-action@improved_basedir
|
||||
uses: sonarsource/sonarcloud-github-action@master
|
||||
with:
|
||||
projectBaseDir: ./kafka-ui-react-app
|
||||
args: -Dsonar.pullrequest.key=${{ github.event.pull_request.number }} -Dsonar.pullrequest.branch=${{ github.head_ref }} -Dsonar.pullrequest.base=${{ github.base_ref }}
|
||||
|
|
38
.github/workflows/helm.yaml
vendored
38
.github/workflows/helm.yaml
vendored
|
@ -1,38 +0,0 @@
|
|||
name: Helm linter
|
||||
on:
|
||||
pull_request:
|
||||
types: ["opened", "edited", "reopened", "synchronize"]
|
||||
branches:
|
||||
- 'master'
|
||||
paths:
|
||||
- "charts/**"
|
||||
jobs:
|
||||
build-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Helm tool installer
|
||||
uses: Azure/setup-helm@v3
|
||||
- name: Setup Kubeval
|
||||
uses: lra/setup-kubeval@v1.0.1
|
||||
#check, was helm version increased in Chart.yaml?
|
||||
- name: Check version
|
||||
shell: bash
|
||||
run: |
|
||||
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
|
||||
echo $helm_version_old
|
||||
echo $helm_version_new
|
||||
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
||||
- name: Run kubeval
|
||||
shell: bash
|
||||
run: |
|
||||
sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
|
||||
K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e '^v1\.1[0-7]\{1\}' | cut -c2-)
|
||||
echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
|
||||
echo ""
|
||||
for version in $K8S_VERSIONS
|
||||
do
|
||||
echo $version;
|
||||
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
|
||||
done
|
9
.github/workflows/master.yaml
vendored
9
.github/workflows/master.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Master branch build & deploy
|
||||
name: "Master: Build & deploy"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
|
@ -58,6 +58,7 @@ jobs:
|
|||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: kafka-ui-api
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: false
|
||||
push: true
|
||||
tags: |
|
||||
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
|
||||
|
@ -73,11 +74,11 @@ jobs:
|
|||
#################################
|
||||
- name: update-master-deployment
|
||||
run: |
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch master
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
|
||||
./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git add ../kafka-ui/*
|
||||
git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push
|
||||
|
|
7
.github/workflows/pr-checks.yaml
vendored
7
.github/workflows/pr-checks.yaml
vendored
|
@ -1,13 +1,14 @@
|
|||
name: "PR Checklist checked"
|
||||
name: "PR: Checklist linter"
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, reopened]
|
||||
|
||||
permissions:
|
||||
checks: write
|
||||
jobs:
|
||||
task-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.1
|
||||
- uses: kentaro-m/task-completed-checker-action@v0.1.2
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- uses: dekinderfiets/pr-description-enforcer@0.0.1
|
||||
|
|
39
.github/workflows/release-helm.yaml
vendored
39
.github/workflows/release-helm.yaml
vendored
|
@ -1,39 +0,0 @@
|
|||
name: Release helm
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "charts/**"
|
||||
|
||||
jobs:
|
||||
release-helm:
|
||||
runs-on:
|
||||
ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
|
||||
- uses: azure/setup-helm@v3
|
||||
|
||||
- name: add chart #realse helm with new version
|
||||
run: |
|
||||
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
|
||||
MSG=$(helm package charts/kafka-ui)
|
||||
git fetch origin
|
||||
git stash
|
||||
git checkout -b gh-pages origin/gh-pages
|
||||
git pull
|
||||
helm repo index .
|
||||
git add -f ${MSG##*/} index.yaml
|
||||
git commit -m "release ${VERSION}"
|
||||
git push
|
||||
- uses: rickstaa/action-create-tag@v1 #create new tag
|
||||
with:
|
||||
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"
|
2
.github/workflows/release-serde-api.yaml
vendored
2
.github/workflows/release-serde-api.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Release serde api
|
||||
name: "Infra: Release: Serde API"
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
|
|
11
.github/workflows/release.yaml
vendored
11
.github/workflows/release.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Release
|
||||
name: "Infra: Release"
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
@ -34,7 +34,7 @@ jobs:
|
|||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload files to a GitHub release
|
||||
uses: svenstaro/upload-release-action@2.5.0
|
||||
uses: svenstaro/upload-release-action@2.7.0
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
|
@ -77,6 +77,7 @@ jobs:
|
|||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: kafka-ui-api
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: false
|
||||
push: true
|
||||
tags: |
|
||||
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
|
||||
|
@ -88,14 +89,12 @@ jobs:
|
|||
|
||||
charts:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
needs: release
|
||||
steps:
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v2
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
repository: provectus/kafka-ui
|
||||
token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
|
||||
repository: provectus/kafka-ui-charts
|
||||
event-type: prepare-helm-release
|
||||
client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'
|
||||
|
|
19
.github/workflows/release_drafter.yml
vendored
19
.github/workflows/release_drafter.yml
vendored
|
@ -1,19 +1,34 @@
|
|||
name: Release Drafter
|
||||
name: "Infra: Release Drafter run"
|
||||
|
||||
on:
|
||||
push:
|
||||
# branches to consider in the event; optional, defaults to all
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Release version'
|
||||
required: false
|
||||
branch:
|
||||
description: 'Target branch'
|
||||
required: false
|
||||
default: 'master'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: release-drafter/release-drafter@v5
|
||||
with:
|
||||
config-name: release_drafter.yaml
|
||||
disable-autolabeler: true
|
||||
version: ${{ github.event.inputs.version }}
|
||||
commitish: ${{ github.event.inputs.branch }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
10
.github/workflows/separate_env_public_create.yml
vendored
10
.github/workflows/separate_env_public_create.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Separate environment create
|
||||
name: "Infra: Feature Testing Public: Init env"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -47,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
@ -76,14 +76,14 @@ jobs:
|
|||
steps:
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
|
||||
- name: separate env create
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git add -A
|
||||
git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
name: Separate environment remove
|
||||
name: "Infra: Feature Testing Public: Destroy env"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -13,12 +13,12 @@ jobs:
|
|||
steps:
|
||||
- name: clone
|
||||
run: |
|
||||
git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
|
||||
git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
|
||||
- name: separate environment remove
|
||||
run: |
|
||||
cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
|
||||
bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
|
||||
git config --global user.email "kafka-ui-infra@provectus.com"
|
||||
git config --global user.name "kafka-ui-infra"
|
||||
git config --global user.email "infra-tech@provectus.com"
|
||||
git config --global user.name "infra-tech"
|
||||
git add -A
|
||||
git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true
|
||||
|
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: 'Close stale issues'
|
||||
name: 'Infra: Close stale issues'
|
||||
on:
|
||||
schedule:
|
||||
- cron: '30 1 * * *'
|
||||
|
|
4
.github/workflows/terraform-deploy.yml
vendored
4
.github/workflows/terraform-deploy.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Terraform deploy
|
||||
name: "Infra: Terraform deploy"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
@ -26,7 +26,7 @@ jobs:
|
|||
echo "Terraform will be triggered in this dir $TF_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/triage_issues.yml
vendored
2
.github/workflows/triage_issues.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Add triage label to new issues
|
||||
name: "Infra: Triage: Apply triage label for issues"
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
|
|
2
.github/workflows/triage_prs.yml
vendored
2
.github/workflows/triage_prs.yml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: Add triage label to new PRs
|
||||
name: "Infra: Triage: Apply triage label for PRs"
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
|
@ -7,7 +7,9 @@ on:
|
|||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
welcome:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
2
.github/workflows/workflow_linter.yaml
vendored
2
.github/workflows/workflow_linter.yaml
vendored
|
@ -1,4 +1,4 @@
|
|||
name: "Workflow linter"
|
||||
name: "Infra: Workflow linter"
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -31,6 +31,9 @@ build/
|
|||
.vscode/
|
||||
/kafka-ui-api/app/node
|
||||
|
||||
### SDKMAN ###
|
||||
.sdkmanrc
|
||||
|
||||
.DS_Store
|
||||
*.code-workspace
|
||||
|
||||
|
|
|
@ -18,6 +18,10 @@
|
|||
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
|
||||
</p>
|
||||
|
||||
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
|
||||
|
||||
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
|
@ -87,7 +91,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
|
|||
|
||||
Then access the web UI at [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
|
||||
|
||||
## Persistent installation
|
||||
|
||||
|
@ -99,7 +103,7 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
environment:
|
||||
DYNAMIC_CONFIG_ENABLED: true
|
||||
DYNAMIC_CONFIG_ENABLED: 'true'
|
||||
volumes:
|
||||
- ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
|
||||
```
|
||||
|
|
|
@ -6,7 +6,8 @@ Following versions of the project are currently being supported with security up
|
|||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.6.x | :white_check_mark: |
|
||||
| 0.7.x | :white_check_mark: |
|
||||
| 0.6.x | :x: |
|
||||
| 0.5.x | :x: |
|
||||
| 0.4.x | :x: |
|
||||
| 0.3.x | :x: |
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
example/
|
||||
README.md
|
|
@ -1,7 +0,0 @@
|
|||
apiVersion: v2
|
||||
name: kafka-ui
|
||||
description: A Helm chart for kafka-UI
|
||||
type: application
|
||||
version: 0.6.2
|
||||
appVersion: v0.6.2
|
||||
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
|
@ -1 +0,0 @@
|
|||
Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.
|
|
@ -1,3 +0,0 @@
|
|||
apiVersion: v1
|
||||
entries: {}
|
||||
generated: "2021-11-11T12:26:08.479581+03:00"
|
|
@ -1,21 +0,0 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
|
||||
{{- end }}
|
|
@ -1,84 +0,0 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kafka-ui.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kafka-ui.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kafka-ui.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kafka-ui.labels" -}}
|
||||
helm.sh/chart: {{ include "kafka-ui.chart" . }}
|
||||
{{ include "kafka-ui.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "kafka-ui.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "kafka-ui.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
This allows us to check if the registry of the image is specified or not.
|
||||
*/}}
|
||||
{{- define "kafka-ui.imageName" -}}
|
||||
{{- $registryName := .Values.image.registry -}}
|
||||
{{- if .Values.global }}
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
{{- $registryName = .Values.global.imageRegistry -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- $repository := .Values.image.repository -}}
|
||||
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
|
||||
{{- if $registryName }}
|
||||
{{- printf "%s/%s:%s" $registryName $repository $tag -}}
|
||||
{{- else }}
|
||||
{{- printf "%s:%s" $repository $tag -}}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
{{- if .Values.envs.config -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
data:
|
||||
{{- toYaml .Values.envs.config | nindent 2 }}
|
||||
{{- end -}}
|
|
@ -1,11 +0,0 @@
|
|||
{{- if .Values.yamlApplicationConfig -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}-fromvalues
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
data:
|
||||
config.yml: |-
|
||||
{{- toYaml .Values.yamlApplicationConfig | nindent 4}}
|
||||
{{ end }}
|
|
@ -1,150 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
labels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
|
||||
{{- if .Values.podLabels }}
|
||||
{{- toYaml .Values.podLabels | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.initContainers }}
|
||||
initContainers:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: {{ include "kafka-ui.imageName" . }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
|
||||
env:
|
||||
{{- with .Values.env }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: SPRING_CONFIG_ADDITIONAL-LOCATION
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
value: /kafka-ui/config.yml
|
||||
{{- else if .Values.yamlApplicationConfigConfigMap }}
|
||||
value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
envFrom:
|
||||
{{- if .Values.existingConfigMap }}
|
||||
- configMapRef:
|
||||
name: {{ .Values.existingConfigMap }}
|
||||
{{- end }}
|
||||
{{- if .Values.envs.config }}
|
||||
- configMapRef:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.existingSecret }}
|
||||
- secretRef:
|
||||
name: {{ .Values.existingSecret }}
|
||||
{{- end }}
|
||||
{{- if .Values.envs.secret}}
|
||||
- secretRef:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
{{- end}}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
{{- if .Values.probes.useHttpsScheme }}
|
||||
scheme: HTTPS
|
||||
{{- end }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
{{- if .Values.probes.useHttpsScheme }}
|
||||
scheme: HTTPS
|
||||
{{- end }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
|
||||
volumeMounts:
|
||||
{{- with .Values.volumeMounts }}
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
- name: kafka-ui-yaml-conf
|
||||
mountPath: /kafka-ui/
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: kafka-ui-yaml-conf-configmap
|
||||
mountPath: /kafka-ui/
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
|
||||
volumes:
|
||||
{{- with .Values.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfig }}
|
||||
- name: kafka-ui-yaml-conf
|
||||
configMap:
|
||||
name: {{ include "kafka-ui.fullname" . }}-fromvalues
|
||||
{{- end }}
|
||||
{{- if .Values.yamlApplicationConfigConfigMap}}
|
||||
- name: kafka-ui-yaml-conf-configmap
|
||||
configMap:
|
||||
name: {{ .Values.yamlApplicationConfigConfigMap.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,46 +0,0 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
|
||||
{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
|
||||
apiVersion: autoscaling/v2
|
||||
{{- else }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
{{- end }}
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
{{- if $isHigher1p25 }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- else }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
{{- if $isHigher1p25 }}
|
||||
target:
|
||||
type: Utilization
|
||||
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- else }}
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,89 +0,0 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "kafka-ui.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
|
||||
{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else }}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls.enabled }}
|
||||
tls:
|
||||
- hosts:
|
||||
- {{ tpl .Values.ingress.host . }}
|
||||
secretName: {{ .Values.ingress.tls.secretName }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.ingressClassName }}
|
||||
ingressClassName: {{ .Values.ingress.ingressClassName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
port:
|
||||
number: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
pathType: {{ .Values.ingress.pathType }}
|
||||
backend:
|
||||
service:
|
||||
name: {{ .serviceName }}
|
||||
port:
|
||||
number: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- if tpl .Values.ingress.host . }}
|
||||
host: {{tpl .Values.ingress.host . }}
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- if tpl .Values.ingress.host . }}
|
||||
host: {{ tpl .Values.ingress.host . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,18 +0,0 @@
|
|||
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
{{- if .Values.networkPolicy.egressRules.customRules }}
|
||||
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,18 +0,0 @@
|
|||
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
ingress:
|
||||
{{- if .Values.networkPolicy.ingressRules.customRules }}
|
||||
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,13 +0,0 @@
|
|||
{{- if .Values.envs.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{- range $key, $val := .Values.envs.secret }}
|
||||
{{ $key }}: {{ $val | b64enc | quote }}
|
||||
{{- end -}}
|
||||
{{- end}}
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.fullname" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.service.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
|
||||
nodePort: {{ .Values.service.nodePort }}
|
||||
{{- end }}
|
||||
selector:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "kafka-ui.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "kafka-ui.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,161 +0,0 @@
|
|||
replicaCount: 1
|
||||
|
||||
image:
|
||||
registry: docker.io
|
||||
repository: provectuslabs/kafka-ui
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
existingConfigMap: ""
|
||||
yamlApplicationConfig:
|
||||
{}
|
||||
# kafka:
|
||||
# clusters:
|
||||
# - name: yaml
|
||||
# bootstrapServers: kafka-service:9092
|
||||
# spring:
|
||||
# security:
|
||||
# oauth2:
|
||||
# auth:
|
||||
# type: disabled
|
||||
# management:
|
||||
# health:
|
||||
# ldap:
|
||||
# enabled: false
|
||||
yamlApplicationConfigConfigMap:
|
||||
{}
|
||||
# keyName: config.yml
|
||||
# name: configMapName
|
||||
existingSecret: ""
|
||||
envs:
|
||||
secret: {}
|
||||
config: {}
|
||||
|
||||
networkPolicy:
|
||||
enabled: false
|
||||
egressRules:
|
||||
## Additional custom egress rules
|
||||
## e.g:
|
||||
## customRules:
|
||||
## - to:
|
||||
## - namespaceSelector:
|
||||
## matchLabels:
|
||||
## label: example
|
||||
customRules: []
|
||||
ingressRules:
|
||||
## Additional custom ingress rules
|
||||
## e.g:
|
||||
## customRules:
|
||||
## - from:
|
||||
## - namespaceSelector:
|
||||
## matchLabels:
|
||||
## label: example
|
||||
customRules: []
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
## Annotations to be added to kafka-ui Deployment
|
||||
##
|
||||
annotations: {}
|
||||
|
||||
## Set field schema as HTTPS for readines and liveness probe
|
||||
##
|
||||
probes:
|
||||
useHttpsScheme: false
|
||||
|
||||
podSecurityContext:
|
||||
{}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext:
|
||||
{}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
# if you want to force a specific nodePort. Must be use with service.type=NodePort
|
||||
# nodePort:
|
||||
|
||||
# Ingress configuration
|
||||
ingress:
|
||||
# Enable ingress resource
|
||||
enabled: false
|
||||
|
||||
# Annotations for the Ingress
|
||||
annotations: {}
|
||||
|
||||
# ingressClassName for the Ingress
|
||||
ingressClassName: ""
|
||||
|
||||
# The path for the Ingress
|
||||
path: "/"
|
||||
|
||||
# The path type for the Ingress
|
||||
pathType: "Prefix"
|
||||
|
||||
# The hostname for the Ingress
|
||||
host: ""
|
||||
|
||||
# configs for Ingress TLS
|
||||
tls:
|
||||
# Enable TLS termination for the Ingress
|
||||
enabled: false
|
||||
# the name of a pre-created Secret containing a TLS private key and certificate
|
||||
secretName: ""
|
||||
|
||||
# HTTP paths to add to the Ingress before the default path
|
||||
precedingPaths: []
|
||||
|
||||
# Http paths to add to the Ingress after the default path
|
||||
succeedingPaths: []
|
||||
|
||||
resources:
|
||||
{}
|
||||
# limits:
|
||||
# cpu: 200m
|
||||
# memory: 512Mi
|
||||
# requests:
|
||||
# cpu: 200m
|
||||
# memory: 256Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
env: {}
|
||||
|
||||
initContainers: {}
|
||||
|
||||
volumeMounts: {}
|
||||
|
||||
volumes: {}
|
|
@ -8,9 +8,9 @@
|
|||
6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
|
||||
7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
|
||||
8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafka’s JMX with SSL and authentication.
|
||||
9. [kafka-ui-reverse-proxy.yaml](./kafka-ui-reverse-proxy.yaml) - An example for using the app behind a proxy (like nginx).
|
||||
9. [kafka-ui-reverse-proxy.yaml](./nginx-proxy.yaml) - An example for using the app behind a proxy (like nginx).
|
||||
10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
|
||||
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
|
||||
11. [kafka-ui-traefik-proxy.yaml](./traefik-proxy.yaml) - Traefik specific proxy configuration.
|
||||
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
|
||||
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
|
||||
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
|
|
@ -124,7 +124,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./data/message.json:/data/message.json
|
||||
depends_on:
|
||||
kafka0:
|
||||
condition: service_healthy
|
||||
|
@ -187,4 +187,4 @@ services:
|
|||
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
KSQL_KSQL_SERVICE_ID: my_ksql_1
|
||||
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
|
||||
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
|
||||
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
|
||||
|
|
|
@ -11,4 +11,8 @@ KafkaClient {
|
|||
user_admin="admin-secret";
|
||||
};
|
||||
|
||||
Client {};
|
||||
Client {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
username="zkuser"
|
||||
password="zkuserpassword";
|
||||
};
|
||||
|
|
4
documentation/compose/jaas/zookeeper_jaas.conf
Normal file
4
documentation/compose/jaas/zookeeper_jaas.conf
Normal file
|
@ -0,0 +1,4 @@
|
|||
Server {
|
||||
org.apache.zookeeper.server.auth.DigestLoginModule required
|
||||
user_zkuser="zkuserpassword";
|
||||
};
|
|
@ -1,2 +1,2 @@
|
|||
rules:
|
||||
- pattern: ".*"
|
||||
- pattern: ".*"
|
||||
|
|
|
@ -57,7 +57,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./data/message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
@ -80,4 +80,4 @@ services:
|
|||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
|
||||
|
|
|
@ -1,84 +0,0 @@
|
|||
---
|
||||
version: "2"
|
||||
services:
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
|
||||
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: "broker,controller"
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
|
||||
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
|
||||
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
|
||||
volumes:
|
||||
- ./scripts/update_run_cluster.sh:/tmp/update_run.sh
|
||||
- ./scripts/clusterID:/tmp/clusterID
|
||||
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
depends_on:
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
ports:
|
||||
- 8085:8085
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
environment:
|
||||
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
||||
CONNECT_GROUP_ID: compose-connect-group
|
||||
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
|
||||
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
|
||||
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
||||
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
59
documentation/compose/kafka-ui-acl-with-zk.yaml
Normal file
59
documentation/compose/kafka-ui-acl-with-zk.yaml
Normal file
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- kafka
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
|
||||
|
||||
zookeeper:
|
||||
image: wurstmeister/zookeeper:3.4.6
|
||||
environment:
|
||||
JVMFLAGS: "-Djava.security.auth.login.config=/etc/zookeeper/zookeeper_jaas.conf"
|
||||
volumes:
|
||||
- ./jaas/zookeeper_jaas.conf:/etc/zookeeper/zookeeper_jaas.conf
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
|
||||
KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.authorizer.AclAuthorizer"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||
KAFKA_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'SASL_PLAINTEXT'
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: 'PLAIN'
|
||||
KAFKA_SECURITY_PROTOCOL: 'SASL_PLAINTEXT'
|
||||
KAFKA_SUPER_USERS: 'User:admin'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
- ./jaas:/etc/kafka/jaas
|
|
@ -20,6 +20,8 @@ services:
|
|||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
|
||||
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
|
||||
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
|
@ -93,7 +95,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./data/message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -69,7 +69,7 @@ services:
|
|||
build:
|
||||
context: ./kafka-connect
|
||||
args:
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
|
@ -104,7 +104,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./data/message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -115,7 +115,7 @@ services:
|
|||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
|
@ -142,7 +142,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./data/message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -38,7 +38,7 @@ services:
|
|||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./data/message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
|
|
|
@ -15,26 +15,23 @@ services:
|
|||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
|
||||
AUTH_TYPE: "LDAP"
|
||||
SPRING_LDAP_URLS: "ldap://ldap:10389"
|
||||
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
|
||||
|
||||
# ===== USER SEARCH FILTER INSTEAD OF DN =====
|
||||
|
||||
# SPRING_LDAP_USERFILTER_SEARCHBASE: "dc=planetexpress,dc=com"
|
||||
# SPRING_LDAP_USERFILTER_SEARCHFILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
|
||||
# LDAP ADMIN USER
|
||||
# SPRING_LDAP_ADMINUSER: "cn=admin,dc=planetexpress,dc=com"
|
||||
# SPRING_LDAP_ADMINPASSWORD: "GoodNewsEveryone"
|
||||
|
||||
# ===== ACTIVE DIRECTORY =====
|
||||
|
||||
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
|
||||
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
|
||||
SPRING_LDAP_BASE: "cn={0},ou=people,dc=planetexpress,dc=com"
|
||||
SPRING_LDAP_ADMIN_USER: "cn=admin,dc=planetexpress,dc=com"
|
||||
SPRING_LDAP_ADMIN_PASSWORD: "GoodNewsEveryone"
|
||||
SPRING_LDAP_USER_FILTER_SEARCH_BASE: "dc=planetexpress,dc=com"
|
||||
SPRING_LDAP_USER_FILTER_SEARCH_FILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
|
||||
SPRING_LDAP_GROUP_FILTER_SEARCH_BASE: "ou=people,dc=planetexpress,dc=com"
|
||||
# OAUTH2.LDAP.ACTIVEDIRECTORY: true
|
||||
# OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
|
||||
|
||||
ldap:
|
||||
image: rroemhild/test-openldap:latest
|
||||
hostname: "ldap"
|
||||
ports:
|
||||
- 10389:10389
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
|
@ -79,4 +76,4 @@ services:
|
|||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
|
@ -4,7 +4,7 @@ services:
|
|||
nginx:
|
||||
image: nginx:latest
|
||||
volumes:
|
||||
- ./proxy.conf:/etc/nginx/conf.d/default.conf
|
||||
- ./data/proxy.conf:/etc/nginx/conf.d/default.conf
|
||||
ports:
|
||||
- 8080:80
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
version: '3.4'
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:local
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- kafka0 # OMITTED, TAKE UP AN EXAMPLE FROM OTHER COMPOSE FILES
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
AUTH_TYPE: OAUTH2_COGNITO
|
||||
AUTH_COGNITO_ISSUER_URI: "https://cognito-idp.eu-central-1.amazonaws.com/eu-central-xxxxxx"
|
||||
AUTH_COGNITO_CLIENT_ID: ""
|
||||
AUTH_COGNITO_CLIENT_SECRET: ""
|
||||
AUTH_COGNITO_SCOPE: "openid"
|
||||
AUTH_COGNITO_USER_NAME_ATTRIBUTE: "username"
|
||||
AUTH_COGNITO_LOGOUT_URI: "https://<domain>.auth.eu-central-1.amazoncognito.com/logout"
|
|
@ -1,7 +1,11 @@
|
|||
#FROM azul/zulu-openjdk-alpine:17-jre-headless
|
||||
FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
|
||||
|
||||
RUN apk add --no-cache gcompat # need to make snappy codec work
|
||||
RUN apk add --no-cache \
|
||||
# snappy codec
|
||||
gcompat \
|
||||
# configuring timezones
|
||||
tzdata
|
||||
RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
|
||||
|
||||
# creating folder for dynamic config usage (certificates uploads, etc)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
<artifactId>kafka-ui-api</artifactId>
|
||||
|
||||
<properties>
|
||||
<jacoco.version>0.8.8</jacoco.version>
|
||||
<jacoco.version>0.8.10</jacoco.version>
|
||||
<sonar.java.coveragePlugin>jacoco</sonar.java.coveragePlugin>
|
||||
<sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
|
||||
<sonar.jacoco.reportPath>${project.basedir}/target/jacoco.exec</sonar.jacoco.reportPath>
|
||||
|
@ -21,12 +21,6 @@
|
|||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<!--TODO: remove, when spring-boot fixed dependency to 6.0.8+ (6.0.7 has CVE) -->
|
||||
<groupId>org.springframework</groupId>
|
||||
<artifactId>spring-core</artifactId>
|
||||
<version>6.0.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-webflux</artifactId>
|
||||
|
@ -61,7 +55,7 @@
|
|||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<version>3.9</version>
|
||||
<version>3.12.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.projectlombok</groupId>
|
||||
|
@ -87,6 +81,12 @@
|
|||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-json-schema-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
|
@ -97,7 +97,7 @@
|
|||
<dependency>
|
||||
<groupId>software.amazon.msk</groupId>
|
||||
<artifactId>aws-msk-iam-auth</artifactId>
|
||||
<version>1.1.5</version>
|
||||
<version>1.1.7</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -115,12 +115,16 @@
|
|||
<groupId>io.projectreactor.addons</groupId>
|
||||
<artifactId>reactor-extra</artifactId>
|
||||
</dependency>
|
||||
<!-- https://github.com/provectus/kafka-ui/pull/3693 -->
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
<artifactId>json</artifactId>
|
||||
<version>${org.json.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.micrometer</groupId>
|
||||
<artifactId>micrometer-registry-prometheus</artifactId>
|
||||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
@ -137,6 +141,11 @@
|
|||
<artifactId>commons-pool2</artifactId>
|
||||
<version>${apache.commons.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
<version>4.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
|
@ -240,8 +249,6 @@
|
|||
<groupId>org.springframework.security</groupId>
|
||||
<artifactId>spring-security-ldap</artifactId>
|
||||
</dependency>
|
||||
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-jsr223</artifactId>
|
||||
|
@ -318,7 +325,7 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-checkstyle-plugin</artifactId>
|
||||
<version>3.1.2</version>
|
||||
<version>3.3.0</version>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.puppycrawl.tools</groupId>
|
||||
|
@ -396,7 +403,7 @@
|
|||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
<version>4.0.0</version>
|
||||
<version>4.9.10</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>get-the-git-infos</id>
|
||||
|
|
|
@ -51,13 +51,12 @@ public class ClustersProperties {
|
|||
List<Masking> masking;
|
||||
Long pollingThrottleRate;
|
||||
TruststoreConfig ssl;
|
||||
AuditProperties audit;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class PollingProperties {
|
||||
Integer pollTimeoutMs;
|
||||
Integer partitionPollTimeout;
|
||||
Integer noDataEmptyPolls;
|
||||
Integer maxPageSize;
|
||||
Integer defaultPageSize;
|
||||
}
|
||||
|
@ -131,8 +130,9 @@ public class ClustersProperties {
|
|||
@Data
|
||||
public static class Masking {
|
||||
Type type;
|
||||
List<String> fields; //if null or empty list - policy will be applied to all fields
|
||||
List<String> pattern; //used when type=MASK
|
||||
List<String> fields;
|
||||
String fieldsNamePattern;
|
||||
List<String> maskingCharsReplacement; //used when type=MASK
|
||||
String replacement; //used when type=REPLACE
|
||||
String topicKeysPattern;
|
||||
String topicValuesPattern;
|
||||
|
@ -142,6 +142,23 @@ public class ClustersProperties {
|
|||
}
|
||||
}
|
||||
|
||||
@Data
|
||||
@NoArgsConstructor
|
||||
@AllArgsConstructor
|
||||
public static class AuditProperties {
|
||||
String topic;
|
||||
Integer auditTopicsPartitions;
|
||||
Boolean topicAuditEnabled;
|
||||
Boolean consoleAuditEnabled;
|
||||
LogLevel level;
|
||||
Map<String, String> auditTopicProperties;
|
||||
|
||||
public enum LogLevel {
|
||||
ALL,
|
||||
ALTER_ONLY //default
|
||||
}
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void validateAndSetDefaults() {
|
||||
if (clusters != null) {
|
||||
|
|
|
@ -1,18 +1,39 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.web.reactive.config.CorsRegistry;
|
||||
import org.springframework.web.reactive.config.WebFluxConfigurer;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.server.reactive.ServerHttpRequest;
|
||||
import org.springframework.http.server.reactive.ServerHttpResponse;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
import org.springframework.web.server.WebFilterChain;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Configuration
|
||||
public class CorsGlobalConfiguration implements WebFluxConfigurer {
|
||||
public class CorsGlobalConfiguration {
|
||||
|
||||
@Override
|
||||
public void addCorsMappings(CorsRegistry registry) {
|
||||
registry.addMapping("/**")
|
||||
.allowedOrigins("*")
|
||||
.allowedMethods("*")
|
||||
.allowedHeaders("*")
|
||||
.allowCredentials(false);
|
||||
@Bean
|
||||
public WebFilter corsFilter() {
|
||||
return (final ServerWebExchange ctx, final WebFilterChain chain) -> {
|
||||
final ServerHttpRequest request = ctx.getRequest();
|
||||
|
||||
final ServerHttpResponse response = ctx.getResponse();
|
||||
final HttpHeaders headers = response.getHeaders();
|
||||
headers.add("Access-Control-Allow-Origin", "*");
|
||||
headers.add("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, OPTIONS");
|
||||
headers.add("Access-Control-Max-Age", "3600");
|
||||
headers.add("Access-Control-Allow-Headers", "Content-Type");
|
||||
|
||||
if (request.getMethod() == HttpMethod.OPTIONS) {
|
||||
response.setStatusCode(HttpStatus.OK);
|
||||
return Mono.empty();
|
||||
}
|
||||
|
||||
return chain.filter(ctx);
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.beans.Transient;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
|
|
@ -13,6 +13,7 @@ abstract class AbstractAuthSecurityConfig {
|
|||
"/resources/**",
|
||||
"/actuator/health/**",
|
||||
"/actuator/info",
|
||||
"/actuator/prometheus",
|
||||
"/auth",
|
||||
"/login",
|
||||
"/logout",
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import lombok.Value;
|
||||
|
||||
public record AuthenticatedUser(String principal, Collection<String> groups) {
|
||||
|
||||
|
|
|
@ -6,13 +6,13 @@ import lombok.extern.slf4j.Slf4j;
|
|||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
|
||||
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
|
||||
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
|
||||
import org.springframework.security.web.server.util.matcher.ServerWebExchangeMatchers;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
|
@ -33,15 +33,19 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
|
||||
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
|
||||
|
||||
return http
|
||||
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
|
||||
.csrf().disable()
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST).permitAll()
|
||||
.anyExchange().authenticated()
|
||||
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
|
||||
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
|
||||
.and().build();
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
|
||||
.logout(spec -> spec
|
||||
.logoutSuccessHandler(logoutSuccessHandler)
|
||||
.requiresLogout(ServerWebExchangeMatchers.pathMatchers(HttpMethod.GET, "/logout")))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,10 +27,12 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
System.exit(1);
|
||||
}
|
||||
log.warn("Authentication is disabled. Access will be unrestricted.");
|
||||
return http.authorizeExchange()
|
||||
.anyExchange().permitAll()
|
||||
.and()
|
||||
.csrf().disable()
|
||||
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.anyExchange()
|
||||
.permitAll()
|
||||
)
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import lombok.Data;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
@ConfigurationProperties("spring.ldap")
|
||||
@Data
|
||||
public class LdapProperties {
|
||||
|
||||
private String urls;
|
||||
private String base;
|
||||
private String adminUser;
|
||||
private String adminPassword;
|
||||
private String userFilterSearchBase;
|
||||
private String userFilterSearchFilter;
|
||||
private String groupFilterSearchBase;
|
||||
private String groupFilterSearchFilter;
|
||||
private String groupRoleAttribute;
|
||||
|
||||
@Value("${oauth2.ldap.activeDirectory:false}")
|
||||
private boolean isActiveDirectory;
|
||||
@Value("${oauth2.ldap.aсtiveDirectory.domain:@null}")
|
||||
private String activeDirectoryDomain;
|
||||
|
||||
}
|
|
@ -1,106 +1,151 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import static com.provectus.kafka.ui.config.auth.AbstractAuthSecurityConfig.AUTH_WHITELIST;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.service.rbac.extractor.RbacLdapAuthoritiesExtractor;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.ApplicationContext;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Import;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.ldap.core.DirContextOperations;
|
||||
import org.springframework.ldap.core.support.BaseLdapPathContextSource;
|
||||
import org.springframework.ldap.core.support.LdapContextSource;
|
||||
import org.springframework.security.authentication.AuthenticationManager;
|
||||
import org.springframework.security.authentication.ProviderManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManager;
|
||||
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.core.userdetails.UserDetails;
|
||||
import org.springframework.security.ldap.authentication.AbstractLdapAuthenticationProvider;
|
||||
import org.springframework.security.ldap.authentication.BindAuthenticator;
|
||||
import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
|
||||
import org.springframework.security.ldap.authentication.ad.ActiveDirectoryLdapAuthenticationProvider;
|
||||
import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
|
||||
import org.springframework.security.ldap.search.LdapUserSearch;
|
||||
import org.springframework.security.ldap.userdetails.DefaultLdapAuthoritiesPopulator;
|
||||
import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
|
||||
import org.springframework.security.ldap.userdetails.LdapUserDetailsMapper;
|
||||
import org.springframework.security.web.server.SecurityWebFilterChain;
|
||||
|
||||
@Configuration
|
||||
@EnableWebFluxSecurity
|
||||
@ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
|
||||
@Import(LdapAutoConfiguration.class)
|
||||
@EnableConfigurationProperties(LdapProperties.class)
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class LdapSecurityConfig extends AbstractAuthSecurityConfig {
|
||||
public class LdapSecurityConfig {
|
||||
|
||||
@Value("${spring.ldap.urls}")
|
||||
private String ldapUrls;
|
||||
@Value("${spring.ldap.dn.pattern:#{null}}")
|
||||
private String ldapUserDnPattern;
|
||||
@Value("${spring.ldap.adminUser:#{null}}")
|
||||
private String adminUser;
|
||||
@Value("${spring.ldap.adminPassword:#{null}}")
|
||||
private String adminPassword;
|
||||
@Value("${spring.ldap.userFilter.searchBase:#{null}}")
|
||||
private String userFilterSearchBase;
|
||||
@Value("${spring.ldap.userFilter.searchFilter:#{null}}")
|
||||
private String userFilterSearchFilter;
|
||||
|
||||
@Value("${oauth2.ldap.activeDirectory:false}")
|
||||
private boolean isActiveDirectory;
|
||||
@Value("${oauth2.ldap.aсtiveDirectory.domain:#{null}}")
|
||||
private String activeDirectoryDomain;
|
||||
private final LdapProperties props;
|
||||
|
||||
@Bean
|
||||
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource) {
|
||||
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource,
|
||||
LdapAuthoritiesPopulator authoritiesExtractor,
|
||||
AccessControlService acs) {
|
||||
var rbacEnabled = acs.isRbacEnabled();
|
||||
BindAuthenticator ba = new BindAuthenticator(contextSource);
|
||||
if (ldapUserDnPattern != null) {
|
||||
ba.setUserDnPatterns(new String[] {ldapUserDnPattern});
|
||||
if (props.getBase() != null) {
|
||||
ba.setUserDnPatterns(new String[] {props.getBase()});
|
||||
}
|
||||
if (userFilterSearchFilter != null) {
|
||||
if (props.getUserFilterSearchFilter() != null) {
|
||||
LdapUserSearch userSearch =
|
||||
new FilterBasedLdapUserSearch(userFilterSearchBase, userFilterSearchFilter, contextSource);
|
||||
new FilterBasedLdapUserSearch(props.getUserFilterSearchBase(), props.getUserFilterSearchFilter(),
|
||||
contextSource);
|
||||
ba.setUserSearch(userSearch);
|
||||
}
|
||||
|
||||
AbstractLdapAuthenticationProvider authenticationProvider;
|
||||
if (!isActiveDirectory) {
|
||||
authenticationProvider = new LdapAuthenticationProvider(ba);
|
||||
if (!props.isActiveDirectory()) {
|
||||
authenticationProvider = rbacEnabled
|
||||
? new LdapAuthenticationProvider(ba, authoritiesExtractor)
|
||||
: new LdapAuthenticationProvider(ba);
|
||||
} else {
|
||||
authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(activeDirectoryDomain, ldapUrls);
|
||||
authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(props.getActiveDirectoryDomain(),
|
||||
props.getUrls()); // TODO Issue #3741
|
||||
authenticationProvider.setUseAuthenticationRequestCredentials(true);
|
||||
}
|
||||
|
||||
if (rbacEnabled) {
|
||||
authenticationProvider.setUserDetailsContextMapper(new UserDetailsMapper());
|
||||
}
|
||||
|
||||
AuthenticationManager am = new ProviderManager(List.of(authenticationProvider));
|
||||
|
||||
return new ReactiveAuthenticationManagerAdapter(am);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public BaseLdapPathContextSource contextSource() {
|
||||
LdapContextSource ctx = new LdapContextSource();
|
||||
ctx.setUrl(ldapUrls);
|
||||
ctx.setUserDn(adminUser);
|
||||
ctx.setPassword(adminPassword);
|
||||
ctx.setUrl(props.getUrls());
|
||||
ctx.setUserDn(props.getAdminUser());
|
||||
ctx.setPassword(props.getAdminPassword());
|
||||
ctx.afterPropertiesSet();
|
||||
return ctx;
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public DefaultLdapAuthoritiesPopulator ldapAuthoritiesExtractor(ApplicationContext context,
|
||||
BaseLdapPathContextSource contextSource,
|
||||
AccessControlService acs) {
|
||||
var rbacEnabled = acs != null && acs.isRbacEnabled();
|
||||
|
||||
DefaultLdapAuthoritiesPopulator extractor;
|
||||
|
||||
if (rbacEnabled) {
|
||||
extractor = new RbacLdapAuthoritiesExtractor(context, contextSource, props.getGroupFilterSearchBase());
|
||||
} else {
|
||||
extractor = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
|
||||
}
|
||||
|
||||
Optional.ofNullable(props.getGroupFilterSearchFilter()).ifPresent(extractor::setGroupSearchFilter);
|
||||
extractor.setRolePrefix("");
|
||||
extractor.setConvertToUpperCase(false);
|
||||
extractor.setSearchSubtree(true);
|
||||
return extractor;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SecurityWebFilterChain configureLdap(ServerHttpSecurity http) {
|
||||
log.info("Configuring LDAP authentication.");
|
||||
if (isActiveDirectory) {
|
||||
if (props.isActiveDirectory()) {
|
||||
log.info("Active Directory support for LDAP has been enabled.");
|
||||
}
|
||||
|
||||
http
|
||||
.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
.and()
|
||||
.httpBasic();
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.formLogin(Customizer.withDefaults())
|
||||
.logout(Customizer.withDefaults())
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
return http.csrf().disable().build();
|
||||
private static class UserDetailsMapper extends LdapUserDetailsMapper {
|
||||
@Override
|
||||
public UserDetails mapUserFromContext(DirContextOperations ctx, String username,
|
||||
Collection<? extends GrantedAuthority> authorities) {
|
||||
UserDetails userDetails = super.mapUserFromContext(ctx, username, authorities);
|
||||
return new RbacLdapUser(userDetails);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -14,7 +15,16 @@ public class OAuthProperties {
|
|||
private Map<String, OAuth2Provider> client = new HashMap<>();
|
||||
|
||||
@PostConstruct
|
||||
public void validate() {
|
||||
public void init() {
|
||||
getClient().values().forEach((provider) -> {
|
||||
if (provider.getCustomParams() == null) {
|
||||
provider.setCustomParams(Collections.emptyMap());
|
||||
}
|
||||
if (provider.getScope() == null) {
|
||||
provider.setScope(Collections.emptySet());
|
||||
}
|
||||
});
|
||||
|
||||
getClient().values().forEach(this::validateProvider);
|
||||
}
|
||||
|
||||
|
|
|
@ -73,8 +73,7 @@ public final class OAuthPropertiesConverter {
|
|||
}
|
||||
|
||||
private static boolean isGoogle(OAuth2Provider provider) {
|
||||
return provider.getCustomParams() != null
|
||||
&& GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
|
||||
return GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,10 +12,11 @@ import lombok.extern.log4j.Log4j2;
|
|||
import org.jetbrains.annotations.Nullable;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
|
||||
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.security.config.Customizer;
|
||||
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
|
||||
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
|
||||
import org.springframework.security.config.web.server.ServerHttpSecurity;
|
||||
|
@ -49,21 +50,15 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
|
||||
log.info("Configuring OAUTH2 authentication.");
|
||||
|
||||
return http.authorizeExchange()
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
|
||||
.and()
|
||||
.oauth2Login()
|
||||
|
||||
.and()
|
||||
.logout()
|
||||
.logoutSuccessHandler(logoutHandler)
|
||||
|
||||
.and()
|
||||
.csrf().disable()
|
||||
return http.authorizeExchange(spec -> spec
|
||||
.pathMatchers(AUTH_WHITELIST)
|
||||
.permitAll()
|
||||
.anyExchange()
|
||||
.authenticated()
|
||||
)
|
||||
.oauth2Login(Customizer.withDefaults())
|
||||
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
|
||||
.csrf(ServerHttpSecurity.CsrfSpec::disable)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -72,13 +67,13 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final OidcReactiveOAuth2UserService delegate = new OidcReactiveOAuth2UserService();
|
||||
return request -> delegate.loadUser(request)
|
||||
.flatMap(user -> {
|
||||
String providerId = request.getClientRegistration().getRegistrationId();
|
||||
final var extractor = getExtractor(providerId, acs);
|
||||
var provider = getProviderByProviderId(request.getClientRegistration().getRegistrationId());
|
||||
final var extractor = getExtractor(provider, acs);
|
||||
if (extractor == null) {
|
||||
return Mono.just(user);
|
||||
}
|
||||
|
||||
return extractor.extract(acs, user, Map.of("request", request))
|
||||
return extractor.extract(acs, user, Map.of("request", request, "provider", provider))
|
||||
.map(groups -> new RbacOidcUser(user, groups));
|
||||
});
|
||||
}
|
||||
|
@ -88,13 +83,13 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
final DefaultReactiveOAuth2UserService delegate = new DefaultReactiveOAuth2UserService();
|
||||
return request -> delegate.loadUser(request)
|
||||
.flatMap(user -> {
|
||||
String providerId = request.getClientRegistration().getRegistrationId();
|
||||
final var extractor = getExtractor(providerId, acs);
|
||||
var provider = getProviderByProviderId(request.getClientRegistration().getRegistrationId());
|
||||
final var extractor = getExtractor(provider, acs);
|
||||
if (extractor == null) {
|
||||
return Mono.just(user);
|
||||
}
|
||||
|
||||
return extractor.extract(acs, user, Map.of("request", request))
|
||||
return extractor.extract(acs, user, Map.of("request", request, "provider", provider))
|
||||
.map(groups -> new RbacOAuth2User(user, groups));
|
||||
});
|
||||
}
|
||||
|
@ -103,7 +98,10 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
|
||||
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
|
||||
final List<ClientRegistration> registrations =
|
||||
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
|
||||
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
|
||||
if (registrations.isEmpty()) {
|
||||
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
|
||||
}
|
||||
return new InMemoryReactiveClientRegistrationRepository(registrations);
|
||||
}
|
||||
|
||||
|
@ -113,18 +111,18 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
private ProviderAuthorityExtractor getExtractor(final String providerId, AccessControlService acs) {
|
||||
final String provider = getProviderByProviderId(providerId);
|
||||
Optional<ProviderAuthorityExtractor> extractor = acs.getExtractors()
|
||||
private ProviderAuthorityExtractor getExtractor(final OAuthProperties.OAuth2Provider provider,
|
||||
AccessControlService acs) {
|
||||
Optional<ProviderAuthorityExtractor> extractor = acs.getOauthExtractors()
|
||||
.stream()
|
||||
.filter(e -> e.isApplicable(provider))
|
||||
.filter(e -> e.isApplicable(provider.getProvider(), provider.getCustomParams()))
|
||||
.findFirst();
|
||||
|
||||
return extractor.orElse(null);
|
||||
}
|
||||
|
||||
private String getProviderByProviderId(final String providerId) {
|
||||
return properties.getClient().get(providerId).getProvider();
|
||||
private OAuthProperties.OAuth2Provider getProviderByProviderId(final String providerId) {
|
||||
return properties.getClient().get(providerId);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.stream.Collectors;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.core.userdetails.UserDetails;
|
||||
|
||||
public class RbacLdapUser implements UserDetails, RbacUser {
|
||||
|
||||
private final UserDetails userDetails;
|
||||
|
||||
public RbacLdapUser(UserDetails userDetails) {
|
||||
this.userDetails = userDetails;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return userDetails.getUsername();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> groups() {
|
||||
return userDetails.getAuthorities().stream().map(GrantedAuthority::getAuthority).collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<? extends GrantedAuthority> getAuthorities() {
|
||||
return userDetails.getAuthorities();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getPassword() {
|
||||
return userDetails.getPassword();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getUsername() {
|
||||
return userDetails.getUsername();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAccountNonExpired() {
|
||||
return userDetails.isAccountNonExpired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAccountNonLocked() {
|
||||
return userDetails.isAccountNonLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCredentialsNonExpired() {
|
||||
return userDetails.isCredentialsNonExpired();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return userDetails.isEnabled();
|
||||
}
|
||||
}
|
|
@ -2,7 +2,6 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.user.OAuth2User;
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import org.springframework.boot.autoconfigure.condition.AllNestedConditions;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
|
||||
public class ActiveDirectoryCondition extends AllNestedConditions {
|
||||
|
||||
public ActiveDirectoryCondition() {
|
||||
super(ConfigurationPhase.PARSE_CONFIGURATION);
|
||||
}
|
||||
|
||||
@ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
|
||||
public static class OnAuthType {
|
||||
|
||||
}
|
||||
|
||||
@ConditionalOnProperty(value = "${oauth2.ldap.activeDirectory}:false", havingValue = "true", matchIfMissing = false)
|
||||
public static class OnActiveDirectory {
|
||||
|
||||
}
|
||||
}
|
|
@ -1,13 +1,14 @@
|
|||
package com.provectus.kafka.ui.config.auth.condition;
|
||||
|
||||
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.context.annotation.Condition;
|
||||
import org.springframework.context.annotation.ConditionContext;
|
||||
import org.springframework.core.type.AnnotatedTypeMetadata;
|
||||
|
||||
public class CognitoCondition extends AbstractProviderCondition implements Condition {
|
||||
@Override
|
||||
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
|
||||
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
|
||||
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,10 +46,8 @@ public class CognitoLogoutSuccessHandler implements LogoutSuccessHandler {
|
|||
.fragment(null)
|
||||
.build();
|
||||
|
||||
Assert.isTrue(
|
||||
provider.getCustomParams() != null && provider.getCustomParams().containsKey("logoutUrl"),
|
||||
"Custom params should contain 'logoutUrl'"
|
||||
);
|
||||
Assert.isTrue(provider.getCustomParams().containsKey("logoutUrl"),
|
||||
"Custom params should contain 'logoutUrl'");
|
||||
final var uri = UriComponentsBuilder
|
||||
.fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
|
||||
.queryParam("client_id", provider.getClientId())
|
||||
|
|
|
@ -2,12 +2,19 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.audit.AuditService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.Signal;
|
||||
|
||||
public abstract class AbstractController {
|
||||
|
||||
private ClustersStorage clustersStorage;
|
||||
protected ClustersStorage clustersStorage;
|
||||
protected AccessControlService accessControlService;
|
||||
protected AuditService auditService;
|
||||
|
||||
protected KafkaCluster getCluster(String name) {
|
||||
return clustersStorage.getClusterByName(name)
|
||||
|
@ -15,8 +22,26 @@ public abstract class AbstractController {
|
|||
String.format("Cluster with name '%s' not found", name)));
|
||||
}
|
||||
|
||||
protected Mono<Void> validateAccess(AccessContext context) {
|
||||
return accessControlService.validateAccess(context);
|
||||
}
|
||||
|
||||
protected void audit(AccessContext acxt, Signal<?> sig) {
|
||||
auditService.audit(acxt, sig);
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setClustersStorage(ClustersStorage clustersStorage) {
|
||||
this.clustersStorage = clustersStorage;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAccessControlService(AccessControlService accessControlService) {
|
||||
this.accessControlService = accessControlService;
|
||||
}
|
||||
|
||||
@Autowired
|
||||
public void setAuditService(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,8 +12,10 @@ import java.security.Principal;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.Objects;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.context.ReactiveSecurityContextHolder;
|
||||
import org.springframework.security.core.context.SecurityContext;
|
||||
|
@ -23,22 +25,19 @@ import reactor.core.publisher.Mono;
|
|||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class AccessController implements AuthorizationApi {
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
public Mono<ResponseEntity<AuthenticationInfoDTO>> getUserAuthInfo(ServerWebExchange exchange) {
|
||||
AuthenticationInfoDTO dto = new AuthenticationInfoDTO();
|
||||
dto.setRbacEnabled(accessControlService.isRbacEnabled());
|
||||
UserInfoDTO userInfo = new UserInfoDTO();
|
||||
|
||||
Mono<List<UserPermissionDTO>> permissions = accessControlService.getUser()
|
||||
.map(user -> accessControlService.getRoles()
|
||||
.stream()
|
||||
.filter(role -> user.groups().contains(role.getName()))
|
||||
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
|
||||
.flatMap(Collection::stream)
|
||||
.collect(Collectors.toList())
|
||||
.toList()
|
||||
)
|
||||
.switchIfEmpty(Mono.just(Collections.emptyList()));
|
||||
|
||||
|
@ -49,13 +48,11 @@ public class AccessController implements AuthorizationApi {
|
|||
return userName
|
||||
.zipWith(permissions)
|
||||
.map(data -> {
|
||||
userInfo.setUsername(data.getT1());
|
||||
userInfo.setPermissions(data.getT2());
|
||||
|
||||
dto.setUserInfo(userInfo);
|
||||
var dto = new AuthenticationInfoDTO(accessControlService.isRbacEnabled());
|
||||
dto.setUserInfo(new UserInfoDTO(data.getT1(), data.getT2()));
|
||||
return dto;
|
||||
})
|
||||
.switchIfEmpty(Mono.just(dto))
|
||||
.switchIfEmpty(Mono.just(new AuthenticationInfoDTO(accessControlService.isRbacEnabled())))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
|
@ -70,11 +67,22 @@ public class AccessController implements AuthorizationApi {
|
|||
dto.setActions(permission.getActions()
|
||||
.stream()
|
||||
.map(String::toUpperCase)
|
||||
.map(ActionDTO::valueOf)
|
||||
.collect(Collectors.toList()));
|
||||
.map(this::mapAction)
|
||||
.filter(Objects::nonNull)
|
||||
.toList());
|
||||
return dto;
|
||||
})
|
||||
.collect(Collectors.toList());
|
||||
.toList();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private ActionDTO mapAction(String name) {
|
||||
try {
|
||||
return ActionDTO.fromValue(name);
|
||||
} catch (IllegalArgumentException e) {
|
||||
log.warn("Unknown Action [{}], skipping", name);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.AclsApi;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
|
||||
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
|
||||
import com.provectus.kafka.ui.service.acl.AclsService;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.common.resource.PatternType;
|
||||
import org.apache.kafka.common.resource.ResourcePatternFilter;
|
||||
import org.apache.kafka.common.resource.ResourceType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class AclsController extends AbstractController implements AclsApi {
|
||||
|
||||
private final AclsService aclsService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("deleteAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(kafkaAclDto)
|
||||
.map(ClusterMapper::toAclBinding)
|
||||
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<KafkaAclDTO>>> listAcls(String clusterName,
|
||||
KafkaAclResourceTypeDTO resourceTypeDto,
|
||||
String resourceName,
|
||||
KafkaAclNamePatternTypeDTO namePatternTypeDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.VIEW)
|
||||
.operationName("listAcls")
|
||||
.build();
|
||||
|
||||
var resourceType = Optional.ofNullable(resourceTypeDto)
|
||||
.map(ClusterMapper::mapAclResourceTypeDto)
|
||||
.orElse(ResourceType.ANY);
|
||||
|
||||
var namePatternType = Optional.ofNullable(namePatternTypeDto)
|
||||
.map(ClusterMapper::mapPatternTypeDto)
|
||||
.orElse(PatternType.ANY);
|
||||
|
||||
var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
|
||||
|
||||
return validateAccess(context).then(
|
||||
Mono.just(
|
||||
ResponseEntity.ok(
|
||||
aclsService.listAcls(getCluster(clusterName), filter)
|
||||
.map(ClusterMapper::toKafkaAclDto)))
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<String>> getAclAsCsv(String clusterName, ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.VIEW)
|
||||
.operationName("getAclAsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
aclsService.getAclAsCsvString(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.flatMap(Mono::just)
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> syncAclsCsv(String clusterName, Mono<String> csvMono, ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("syncAclsCsv")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(csvMono)
|
||||
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
|
||||
Mono<CreateConsumerAclDTO> createConsumerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createConsumerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createConsumerAclDto)
|
||||
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
|
||||
Mono<CreateProducerAclDTO> createProducerAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createProducerAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createProducerAclDto)
|
||||
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
|
||||
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
|
||||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.aclActions(AclAction.EDIT)
|
||||
.operationName("createStreamAppAcl")
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.then(createStreamAppAclDto)
|
||||
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
}
|
|
@ -15,7 +15,6 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ApplicationInfoService;
|
||||
import com.provectus.kafka.ui.service.KafkaClusterFactory;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import com.provectus.kafka.ui.util.ApplicationRestarter;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
|
||||
|
@ -27,6 +26,7 @@ import org.mapstruct.Mapper;
|
|||
import org.mapstruct.factory.Mappers;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.http.codec.multipart.FilePart;
|
||||
import org.springframework.http.codec.multipart.Part;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
@ -37,7 +37,7 @@ import reactor.util.function.Tuples;
|
|||
@Slf4j
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class ApplicationConfigController implements ApplicationConfigApi {
|
||||
public class ApplicationConfigController extends AbstractController implements ApplicationConfigApi {
|
||||
|
||||
private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
|
||||
|
||||
|
@ -49,7 +49,6 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
|
||||
}
|
||||
|
||||
private final AccessControlService accessControlService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
private final ApplicationRestarter restarter;
|
||||
private final KafkaClusterFactory kafkaClusterFactory;
|
||||
|
@ -62,59 +61,69 @@ public class ApplicationConfigController implements ApplicationConfigApi {
|
|||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.build()
|
||||
)
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(VIEW)
|
||||
.operationName("getCurrentConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
|
||||
new ApplicationConfigDTO()
|
||||
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
|
||||
)));
|
||||
)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
|
||||
ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("restartWithConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(restartRequestDto)
|
||||
.map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
});
|
||||
.doOnNext(restartDto -> {
|
||||
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
|
||||
dynamicConfigOperations.persist(newConfig);
|
||||
})
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnSuccess(dto -> restarter.requestRestart())
|
||||
.map(dto -> ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(FilePart file, ServerWebExchange exchange) {
|
||||
return accessControlService
|
||||
.validateAccess(
|
||||
AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.build()
|
||||
)
|
||||
.then(dynamicConfigOperations.uploadConfigRelatedFile(file))
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok);
|
||||
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("uploadConfigRelatedFile")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(fileFlux.single())
|
||||
.flatMap(file ->
|
||||
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
|
||||
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
|
||||
ServerWebExchange exchange) {
|
||||
return configDto
|
||||
var context = AccessContext.builder()
|
||||
.applicationConfigActions(EDIT)
|
||||
.operationName("validateConfig")
|
||||
.build();
|
||||
return validateAccess(context)
|
||||
.then(configDto)
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = newConfig.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
.map(ResponseEntity::ok);
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
|
||||
|
|
|
@ -36,10 +36,10 @@ public class AuthController {
|
|||
+ " <meta name=\"description\" content=\"\">\n"
|
||||
+ " <meta name=\"author\" content=\"\">\n"
|
||||
+ " <title>Please sign in</title>\n"
|
||||
+ " <link href=\"/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ " <link href=\"" + contextPath + "/static/css/bootstrap.min.css\" rel=\"stylesheet\" "
|
||||
+ "integrity=\"sha384-/Y6pD6FV/Vv2HJnA6t+vslU6fwYXjCFtcEpHbNJ0lyAFsXTsjBbfaDjzALeQsN6M\" "
|
||||
+ "crossorigin=\"anonymous\">\n"
|
||||
+ " <link href=\"/static/css/signin.css\" "
|
||||
+ " <link href=\"" + contextPath + "/static/css/signin.css\" "
|
||||
+ "rel=\"stylesheet\" crossorigin=\"anonymous\"/>\n"
|
||||
+ " </head>\n"
|
||||
+ " <body>\n"
|
||||
|
|
|
@ -11,8 +11,9 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
|
|||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
|
||||
import com.provectus.kafka.ui.service.BrokerService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -25,63 +26,79 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class BrokersController extends AbstractController implements BrokersApi {
|
||||
private static final String BROKER_ID = "brokerId";
|
||||
|
||||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.build());
|
||||
.operationName("getBrokers")
|
||||
.build();
|
||||
|
||||
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
|
||||
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(job));
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(job))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.build());
|
||||
.operationName("getBrokersMetrics")
|
||||
.operationParams(Map.of("id", id))
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
);
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
brokerService.getBrokerMetrics(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerMetrics)
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
|
||||
List<Integer> brokers,
|
||||
@Nullable List<Integer> brokers,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.build());
|
||||
|
||||
return validateAccess.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
|
||||
List<Integer> brokerIds = brokers == null ? List.of() : brokers;
|
||||
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getAllBrokersLogdirs")
|
||||
.operationParams(Map.of("brokerIds", brokerIds))
|
||||
.build();
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(ResponseEntity.ok(
|
||||
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
|
||||
Integer id,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW)
|
||||
.build());
|
||||
.operationName("getBrokerConfig")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
|
||||
return validateAccess.thenReturn(
|
||||
return validateAccess(context).thenReturn(
|
||||
ResponseEntity.ok(
|
||||
brokerService.getBrokerConfig(getCluster(clusterName), id)
|
||||
.map(clusterMapper::toBrokerConfig))
|
||||
);
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -89,16 +106,18 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
Integer id,
|
||||
Mono<BrokerLogdirUpdateDTO> brokerLogdir,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.build());
|
||||
.operationName("updateBrokerTopicPartitionLogDir")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
return validateAccess(context).then(
|
||||
brokerLogdir
|
||||
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -107,16 +126,18 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
String name,
|
||||
Mono<BrokerConfigItemDTO> brokerConfig,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.build());
|
||||
.operationName("updateBrokerConfigByName")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
return validateAccess(context).then(
|
||||
brokerConfig
|
||||
.flatMap(bci -> brokerService.updateBrokerConfigByName(
|
||||
getCluster(clusterName), id, name, bci.getValue()))
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
|
|||
import com.provectus.kafka.ui.model.ClusterStatsDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -20,7 +19,6 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class ClustersController extends AbstractController implements ClustersApi {
|
||||
private final ClusterService clusterService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
|
||||
|
@ -35,14 +33,16 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getClusterMetrics")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterMetrics(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
);
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -50,14 +50,16 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
ServerWebExchange exchange) {
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getClusterStats")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
return validateAccess(context)
|
||||
.then(
|
||||
clusterService.getClusterStats(getCluster(clusterName))
|
||||
.map(ResponseEntity::ok)
|
||||
.onErrorReturn(ResponseEntity.notFound().build())
|
||||
);
|
||||
)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -66,11 +68,11 @@ public class ClustersController extends AbstractController implements ClustersAp
|
|||
|
||||
AccessContext context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("updateClusterInfo")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
|
||||
);
|
||||
return validateAccess(context)
|
||||
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,11 +19,9 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import com.provectus.kafka.ui.service.rbac.AccessControlService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -41,7 +39,6 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
|
||||
private final ConsumerGroupService consumerGroupService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final AccessControlService accessControlService;
|
||||
|
||||
@Value("${consumer.groups.page.size:25}")
|
||||
private int defaultConsumerGroupsPageSize;
|
||||
|
@ -50,44 +47,47 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
|
||||
String id,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.consumerGroup(id)
|
||||
.consumerGroupActions(DELETE)
|
||||
.build());
|
||||
.operationName("deleteConsumerGroup")
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
|
||||
.thenReturn(ResponseEntity.ok().build())
|
||||
);
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
|
||||
String consumerGroupId,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.consumerGroup(consumerGroupId)
|
||||
.consumerGroupActions(VIEW)
|
||||
.build());
|
||||
.operationName("getConsumerGroup")
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
return validateAccess(context)
|
||||
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
|
||||
.map(ConsumerGroupMapper::toDetailsDto)
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
.map(ResponseEntity::ok))
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.build());
|
||||
.operationName("getTopicConsumerGroups")
|
||||
.build();
|
||||
|
||||
Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
|
||||
consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
|
||||
|
@ -99,7 +99,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
|
||||
return validateAccess.then(job);
|
||||
return validateAccess(context)
|
||||
.then(job)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -112,12 +114,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
SortOrderDTO sortOrderDto,
|
||||
ServerWebExchange exchange) {
|
||||
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
// consumer group access validation is within the service
|
||||
.build());
|
||||
.operationName("getConsumerGroupsPage")
|
||||
.build();
|
||||
|
||||
return validateAccess.then(
|
||||
return validateAccess(context).then(
|
||||
consumerGroupService.getConsumerGroupsPage(
|
||||
getCluster(clusterName),
|
||||
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
|
||||
|
@ -128,7 +131,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
)
|
||||
.map(this::convertPage)
|
||||
.map(ResponseEntity::ok)
|
||||
);
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -137,12 +140,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
Mono<ConsumerGroupOffsetsResetDTO> resetDto,
|
||||
ServerWebExchange exchange) {
|
||||
return resetDto.flatMap(reset -> {
|
||||
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(reset.getTopic())
|
||||
.topicActions(TopicAction.VIEW)
|
||||
.consumerGroupActions(RESET_OFFSETS)
|
||||
.build());
|
||||
.operationName("resetConsumerGroupOffsets")
|
||||
.build();
|
||||
|
||||
Supplier<Mono<Void>> mono = () -> {
|
||||
var cluster = getCluster(clusterName);
|
||||
|
@ -182,7 +186,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
}
|
||||
};
|
||||
|
||||
return validateAccess.then(mono.get());
|
||||
return validateAccess(context)
|
||||
.then(mono.get())
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}).thenReturn(ResponseEntity.ok().build());
|
||||
}
|
||||
|
||||
|
@ -193,7 +199,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
|
||||
.stream()
|
||||
.map(ConsumerGroupMapper::toDto)
|
||||
.collect(Collectors.toList()));
|
||||
.toList());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue