Merge branch 'master' into consumer_group_id_in_reset_offset_heading

This commit is contained in:
Sungyun Hur 2023-08-03 18:31:19 +09:00 committed by GitHub
commit cefe3716e3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
177 changed files with 3921 additions and 1827 deletions

4
.github/CODEOWNERS vendored
View file

@ -14,5 +14,5 @@
# TESTS
/kafka-ui-e2e-checks/ @provectus/kafka-qa
# HELM CHARTS
/charts/ @provectus/kafka-devops
# INFRA
/.github/workflows/ @provectus/kafka-devops

View file

@ -1,5 +1,8 @@
blank_issues_enabled: false
contact_links:
- name: Report helm issue
url: https://github.com/provectus/kafka-ui-charts
about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
- name: Official documentation
url: https://docs.kafka-ui.provectus.io/
about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.

View file

@ -1,92 +0,0 @@
name: "⎈ K8s/Helm problem report"
description: "Report a problem with k8s/helm charts/etc"
labels: ["status/triage", "scope/k8s"]
assignees: []
body:
- type: markdown
attributes:
value: |
Hi, thanks for raising the issue(-s), all contributions really matter!
Please, note that we'll close the issue without further explanation if you don't follow
this template and don't provide the information requested within this template.
- type: checkboxes
id: terms
attributes:
label: Issue submitter TODO list
description: By you checking these checkboxes we can be sure you've done the essential things.
options:
- label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
required: true
- label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
required: true
- label: I've tried running `master`-labeled docker image and the issue still persists there
required: true
- label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
required: true
- type: textarea
attributes:
label: Describe the bug (actual behavior)
description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
validations:
required: true
- type: textarea
attributes:
label: Expected behavior
description: A clear and concise description of what you expected to happen
validations:
required: false
- type: textarea
attributes:
label: Your installation details
description: |
How do you run the app? Please provide as much info as possible:
1. App version (commit hash in the top left corner of the UI)
2. Helm chart version
3. Your application config. Please remove the sensitive info like passwords or API keys.
4. Any IAAC configs
validations:
required: true
- type: textarea
attributes:
label: Steps to reproduce
description: |
Please write down the order of the actions required to reproduce the issue.
For the advanced setups/complicated issue, we might need you to provide
a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
validations:
required: true
- type: textarea
attributes:
label: Screenshots
description: |
If applicable, add screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Logs
description: |
If applicable, *upload* screenshots to help explain your problem
validations:
required: false
- type: textarea
attributes:
label: Additional context
description: |
Add any other context about the problem here. E.G.:
1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
Were they successful or the same issue occurred? Please provide steps as well.
2. Related issues (if there are any).
3. Logs (if available)
4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
validations:
required: false

View file

@ -1,4 +1,4 @@
name: AWS Marketplace Publisher
name: "Infra: Release: AWS Marketplace Publisher"
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: Backend build and test
name: "Backend: PR/master build & test"
on:
push:
branches:
@ -8,6 +8,9 @@ on:
paths:
- "kafka-ui-api/**"
- "pom.xml"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
runs-on: ubuntu-latest
@ -29,7 +32,7 @@ jobs:
key: ${{ runner.os }}-sonar
restore-keys: ${{ runner.os }}-sonar
- name: Build and analyze pull request target
if: ${{ github.event_name == 'pull_request_target' }}
if: ${{ github.event_name == 'pull_request' }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}

View file

@ -1,4 +1,4 @@
name: Pull Request Labels
name: "Infra: PR block merge"
on:
pull_request:
types: [opened, labeled, unlabeled, synchronize]
@ -6,7 +6,7 @@ jobs:
block_merge:
runs-on: ubuntu-latest
steps:
- uses: mheap/github-action-required-labels@v4
- uses: mheap/github-action-required-labels@v5
with:
mode: exactly
count: 0

View file

@ -1,4 +1,4 @@
name: Feature testing init
name: "Infra: Feature Testing: Init env"
on:
workflow_dispatch:

View file

@ -1,4 +1,4 @@
name: Feature testing destroy
name: "Infra: Feature Testing: Destroy env"
on:
workflow_dispatch:
pull_request:

View file

@ -1,4 +1,4 @@
name: Build Docker image and push
name: "Infra: Image Testing: Deploy"
on:
workflow_dispatch:
pull_request:
@ -64,14 +64,11 @@ jobs:
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: update status check
uses: Sibz/github-status-action@v1.1.6
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v3
with:
authToken: ${{secrets.GITHUB_TOKEN}}
context: "Image published at"
state: "success"
sha: ${{ github.event.pull_request.head.sha || github.sha }}
target_url: "public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}"
issue-number: ${{ github.event.pull_request.number }}
body: |
Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
outputs:
tag: ${{ steps.extract_branch.outputs.tag }}

View file

@ -1,28 +0,0 @@
name: Prepare helm release
on:
repository_dispatch:
types: [prepare-helm-release]
jobs:
change-app-version:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- name: Change versions
run: |
git checkout -b release-${{ github.event.client_payload.appversion}}
version=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
version=${version%.*}.$((${version##*.}+1))
sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
git add charts/kafka-ui/Chart.yaml
git commit -m "release ${version}"
git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
- name: Slack Notification
uses: rtCamp/action-slack-notify@v2
env:
SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

View file

@ -55,7 +55,7 @@ jobs:
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Run CVE checks
uses: aquasecurity/trivy-action@0.10.0
uses: aquasecurity/trivy-action@0.11.2
with:
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
format: "table"

View file

@ -1,4 +1,4 @@
name: Delete Public ECR Image
name: "Infra: Image Testing: Delete"
on:
workflow_dispatch:
pull_request:
@ -32,9 +32,3 @@ jobs:
--repository-name kafka-ui-custom-build \
--image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
--region us-east-1
- name: make comment with private deployment link
uses: peter-evans/create-or-update-comment@v3
with:
issue-number: ${{ github.event.pull_request.number }}
body: |
Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed

View file

@ -1,4 +1,4 @@
name: Documentation URLs linter
name: "Infra: Docs: URL linter"
on:
pull_request:
types:

View file

@ -1,4 +1,4 @@
name: E2E Automation suite
name: "E2E: Automation suite"
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: E2E PR health check
name: "E2E: PR healthcheck"
on:
pull_request_target:
types: [ "opened", "edited", "reopened", "synchronize" ]
@ -8,6 +8,8 @@ on:
- "kafka-ui-react-app/**"
- "kafka-ui-e2e-checks/**"
- "pom.xml"
permissions:
statuses: write
jobs:
build-and-test:
runs-on: ubuntu-latest
@ -18,8 +20,8 @@ jobs:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
aws-region: eu-central-1
- name: Set up environment
id: set_env_values

View file

@ -1,4 +1,4 @@
name: E2E Manual suite
name: "E2E: Manual suite"
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: E2E Weekly suite
name: "E2E: Weekly suite"
on:
schedule:
- cron: '0 1 * * 1'

View file

@ -1,4 +1,4 @@
name: Frontend build and test
name: "Frontend: PR/master build & test"
on:
push:
branches:
@ -8,6 +8,9 @@ on:
paths:
- "kafka-ui-contract/**"
- "kafka-ui-react-app/**"
permissions:
checks: write
pull-requests: write
jobs:
build-and-test:
env:
@ -24,7 +27,7 @@ jobs:
with:
version: 7.4.0
- name: Install node
uses: actions/setup-node@v3.6.0
uses: actions/setup-node@v3.7.0
with:
node-version: "16.15.0"
cache: "pnpm"

View file

@ -1,38 +0,0 @@
name: Helm linter
on:
pull_request:
types: ["opened", "edited", "reopened", "synchronize"]
branches:
- 'master'
paths:
- "charts/**"
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Helm tool installer
uses: Azure/setup-helm@v3
- name: Setup Kubeval
uses: lra/setup-kubeval@v1.0.1
#check, was helm version increased in Chart.yaml?
- name: Check version
shell: bash
run: |
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
echo $helm_version_old
echo $helm_version_new
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
- name: Run kubeval
shell: bash
run: |
sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e '^v1\.1[0-7]\{1\}' | cut -c2-)
echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
echo ""
for version in $K8S_VERSIONS
do
echo $version;
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
done

View file

@ -1,4 +1,4 @@
name: Master branch build & deploy
name: "Master: Build & deploy"
on:
workflow_dispatch:
push:
@ -58,6 +58,7 @@ jobs:
builder: ${{ steps.buildx.outputs.name }}
context: kafka-ui-api
platforms: linux/amd64,linux/arm64
provenance: false
push: true
tags: |
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}

View file

@ -1,13 +1,14 @@
name: "PR Checklist checked"
name: "PR: Checklist linter"
on:
pull_request_target:
types: [opened, edited, synchronize, reopened]
permissions:
checks: write
jobs:
task-check:
runs-on: ubuntu-latest
steps:
- uses: kentaro-m/task-completed-checker-action@v0.1.1
- uses: kentaro-m/task-completed-checker-action@v0.1.2
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
- uses: dekinderfiets/pr-description-enforcer@0.0.1

View file

@ -1,39 +0,0 @@
name: Release helm
on:
push:
branches:
- master
paths:
- "charts/**"
jobs:
release-helm:
runs-on:
ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 1
- run: |
git config user.name github-actions
git config user.email github-actions@github.com
- uses: azure/setup-helm@v3
- name: add chart #realse helm with new version
run: |
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
MSG=$(helm package charts/kafka-ui)
git fetch origin
git stash
git checkout -b gh-pages origin/gh-pages
git pull
helm repo index .
git add -f ${MSG##*/} index.yaml
git commit -m "release ${VERSION}"
git push
- uses: rickstaa/action-create-tag@v1 #create new tag
with:
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"

View file

@ -1,4 +1,4 @@
name: Release serde api
name: "Infra: Release: Serde API"
on: workflow_dispatch
jobs:

View file

@ -1,4 +1,4 @@
name: Release
name: "Infra: Release"
on:
release:
types: [published]
@ -34,7 +34,7 @@ jobs:
echo "version=${VERSION}" >> $GITHUB_OUTPUT
- name: Upload files to a GitHub release
uses: svenstaro/upload-release-action@2.5.0
uses: svenstaro/upload-release-action@2.6.1
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar

View file

@ -1,4 +1,4 @@
name: Release Drafter
name: "Infra: Release Drafter run"
on:
push:

View file

@ -1,4 +1,4 @@
name: Separate environment create
name: "Infra: Feature Testing Public: Init env"
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: Separate environment remove
name: "Infra: Feature Testing Public: Destroy env"
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: 'Close stale issues'
name: 'Infra: Close stale issues'
on:
schedule:
- cron: '30 1 * * *'

View file

@ -1,4 +1,4 @@
name: Terraform deploy
name: "Infra: Terraform deploy"
on:
workflow_dispatch:
inputs:

View file

@ -1,4 +1,4 @@
name: Add triage label to new issues
name: "Infra: Triage: Apply triage label for issues"
on:
issues:
types:

View file

@ -1,4 +1,4 @@
name: Add triage label to new PRs
name: "Infra: Triage: Apply triage label for PRs"
on:
pull_request:
types:

View file

@ -7,7 +7,9 @@ on:
issues:
types:
- opened
permissions:
issues: write
pull-requests: write
jobs:
welcome:
runs-on: ubuntu-latest

View file

@ -1,4 +1,4 @@
name: "Workflow linter"
name: "Infra: Workflow linter"
on:
pull_request:
types:

3
.gitignore vendored
View file

@ -31,6 +31,9 @@ build/
.vscode/
/kafka-ui-api/app/node
### SDKMAN ###
.sdkmanrc
.DS_Store
*.code-workspace

View file

@ -18,6 +18,10 @@
<a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
</p>
<p align="center">
<img src="https://repobeats.axiom.co/api/embed/2e8a7c2d711af9daddd34f9791143e7554c35d0f.svg" />
</p>
#### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.

View file

@ -1,25 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
example/
README.md

View file

@ -1,7 +0,0 @@
apiVersion: v2
name: kafka-ui
description: A Helm chart for kafka-UI
type: application
version: 0.7.0
appVersion: v0.7.0
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

View file

@ -1 +0,0 @@
Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.

View file

@ -1,3 +0,0 @@
apiVersion: v1
entries: {}
generated: "2021-11-11T12:26:08.479581+03:00"

View file

@ -1,21 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
{{- end }}

View file

@ -1,84 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "kafka-ui.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kafka-ui.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kafka-ui.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kafka-ui.labels" -}}
helm.sh/chart: {{ include "kafka-ui.chart" . }}
{{ include "kafka-ui.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kafka-ui.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kafka-ui.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
This allows us to check if the registry of the image is specified or not.
*/}}
{{- define "kafka-ui.imageName" -}}
{{- $registryName := .Values.image.registry -}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- $registryName = .Values.global.imageRegistry -}}
{{- end -}}
{{- end -}}
{{- $repository := .Values.image.repository -}}
{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
{{- if $registryName }}
{{- printf "%s/%s:%s" $registryName $repository $tag -}}
{{- else }}
{{- printf "%s:%s" $repository $tag -}}
{{- end }}
{{- end -}}

View file

@ -1,10 +0,0 @@
{{- if .Values.envs.config -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
{{- toYaml .Values.envs.config | nindent 2 }}
{{- end -}}

View file

@ -1,11 +0,0 @@
{{- if .Values.yamlApplicationConfig -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka-ui.fullname" . }}-fromvalues
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
data:
config.yml: |-
{{- toYaml .Values.yamlApplicationConfig | nindent 4}}
{{ end }}

View file

@ -1,150 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.initContainers }}
initContainers:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: {{ include "kafka-ui.imageName" . }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if or .Values.env .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
env:
{{- with .Values.env }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
- name: SPRING_CONFIG_ADDITIONAL-LOCATION
{{- if .Values.yamlApplicationConfig }}
value: /kafka-ui/config.yml
{{- else if .Values.yamlApplicationConfigConfigMap }}
value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
{{- end }}
{{- end }}
{{- end }}
envFrom:
{{- if .Values.existingConfigMap }}
- configMapRef:
name: {{ .Values.existingConfigMap }}
{{- end }}
{{- if .Values.envs.config }}
- configMapRef:
name: {{ include "kafka-ui.fullname" . }}
{{- end }}
{{- if .Values.existingSecret }}
- secretRef:
name: {{ .Values.existingSecret }}
{{- end }}
{{- if .Values.envs.secret}}
- secretRef:
name: {{ include "kafka-ui.fullname" . }}
{{- end}}
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
{{- if .Values.probes.useHttpsScheme }}
scheme: HTTPS
{{- end }}
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
readinessProbe:
httpGet:
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
path: {{ get $contextPath "path" }}
port: http
{{- if .Values.probes.useHttpsScheme }}
scheme: HTTPS
{{- end }}
initialDelaySeconds: 60
periodSeconds: 30
timeoutSeconds: 10
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
volumeMounts:
{{- with .Values.volumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.yamlApplicationConfig }}
- name: kafka-ui-yaml-conf
mountPath: /kafka-ui/
{{- end }}
{{- if .Values.yamlApplicationConfigConfigMap}}
- name: kafka-ui-yaml-conf-configmap
mountPath: /kafka-ui/
{{- end }}
{{- end }}
{{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
volumes:
{{- with .Values.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.yamlApplicationConfig }}
- name: kafka-ui-yaml-conf
configMap:
name: {{ include "kafka-ui.fullname" . }}-fromvalues
{{- end }}
{{- if .Values.yamlApplicationConfigConfigMap}}
- name: kafka-ui-yaml-conf-configmap
configMap:
name: {{ .Values.yamlApplicationConfigConfigMap.name }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -1,46 +0,0 @@
{{- if .Values.autoscaling.enabled }}
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
apiVersion: autoscaling/v2
{{- else }}
apiVersion: autoscaling/v2beta1
{{- end }}
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "kafka-ui.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
{{- if $isHigher1p25 }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- else }}
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
{{- if $isHigher1p25 }}
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- else }}
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,89 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "kafka-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
apiVersion: networking.k8s.io/v1
{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
apiVersion: networking.k8s.io/v1beta1
{{- else }}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls.enabled }}
tls:
- hosts:
- {{ tpl .Values.ingress.host . }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
{{- if .Values.ingress.ingressClassName }}
ingressClassName: {{ .Values.ingress.ingressClassName }}
{{- end }}
rules:
- http:
paths:
{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
pathType: {{ .Values.ingress.pathType }}
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
- backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
pathType: {{ .Values.ingress.pathType }}
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
pathType: {{ .Values.ingress.pathType }}
backend:
service:
name: {{ .serviceName }}
port:
number: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{tpl .Values.ingress.host . }}
{{- end }}
{{- else -}}
{{- range .Values.ingress.precedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
- backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- if .Values.ingress.path }}
path: {{ .Values.ingress.path }}
{{- end }}
{{- range .Values.ingress.succeedingPaths }}
- path: {{ .path }}
backend:
serviceName: {{ .serviceName }}
servicePort: {{ .servicePort }}
{{- end }}
{{- if tpl .Values.ingress.host . }}
host: {{ tpl .Values.ingress.host . }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,18 +0,0 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Egress
egress:
{{- if .Values.networkPolicy.egressRules.customRules }}
{{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,18 +0,0 @@
{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
spec:
podSelector:
matchLabels:
{{- include "kafka-ui.selectorLabels" . | nindent 6 }}
policyTypes:
- Ingress
ingress:
{{- if .Values.networkPolicy.ingressRules.customRules }}
{{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,13 +0,0 @@
{{- if .Values.envs.secret -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
type: Opaque
data:
{{- range $key, $val := .Values.envs.secret }}
{{ $key }}: {{ $val | b64enc | quote }}
{{- end -}}
{{- end}}

View file

@ -1,22 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "kafka-ui.fullname" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- if .Values.service.annotations }}
annotations:
{{ toYaml .Values.service.annotations | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
{{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
selector:
{{- include "kafka-ui.selectorLabels" . | nindent 4 }}

View file

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "kafka-ui.serviceAccountName" . }}
labels:
{{- include "kafka-ui.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,161 +0,0 @@
replicaCount: 1
image:
registry: docker.io
repository: provectuslabs/kafka-ui
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
existingConfigMap: ""
yamlApplicationConfig:
{}
# kafka:
# clusters:
# - name: yaml
# bootstrapServers: kafka-service:9092
# spring:
# security:
# oauth2:
# auth:
# type: disabled
# management:
# health:
# ldap:
# enabled: false
yamlApplicationConfigConfigMap:
{}
# keyName: config.yml
# name: configMapName
existingSecret: ""
envs:
secret: {}
config: {}
networkPolicy:
enabled: false
egressRules:
## Additional custom egress rules
## e.g:
## customRules:
## - to:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
ingressRules:
## Additional custom ingress rules
## e.g:
## customRules:
## - from:
## - namespaceSelector:
## matchLabels:
## label: example
customRules: []
podAnnotations: {}
podLabels: {}
## Annotations to be added to kafka-ui Deployment
##
annotations: {}
## Set field schema as HTTPS for readines and liveness probe
##
probes:
useHttpsScheme: false
podSecurityContext:
{}
# fsGroup: 2000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
# if you want to force a specific nodePort. Must be use with service.type=NodePort
# nodePort:
# Ingress configuration
ingress:
# Enable ingress resource
enabled: false
# Annotations for the Ingress
annotations: {}
# ingressClassName for the Ingress
ingressClassName: ""
# The path for the Ingress
path: "/"
# The path type for the Ingress
pathType: "Prefix"
# The hostname for the Ingress
host: ""
# configs for Ingress TLS
tls:
# Enable TLS termination for the Ingress
enabled: false
# the name of a pre-created Secret containing a TLS private key and certificate
secretName: ""
# HTTP paths to add to the Ingress before the default path
precedingPaths: []
# Http paths to add to the Ingress after the default path
succeedingPaths: []
resources:
{}
# limits:
# cpu: 200m
# memory: 512Mi
# requests:
# cpu: 200m
# memory: 256Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
env: {}
initContainers: {}
volumeMounts: {}
volumes: {}

View file

@ -1,2 +1,2 @@
rules:
- pattern: ".*"
- pattern: ".*"

View file

@ -20,6 +20,8 @@ services:
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
kafka0:
image: confluentinc/cp-kafka:7.2.1.arm64

View file

@ -91,7 +91,7 @@
<dependency>
<groupId>software.amazon.msk</groupId>
<artifactId>aws-msk-iam-auth</artifactId>
<version>1.1.6</version>
<version>1.1.7</version>
</dependency>
<dependency>
@ -114,6 +114,11 @@
<artifactId>json</artifactId>
<version>${org.json.version}</version>
</dependency>
<dependency>
<groupId>io.micrometer</groupId>
<artifactId>micrometer-registry-prometheus</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
@ -311,7 +316,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-checkstyle-plugin</artifactId>
<version>3.1.2</version>
<version>3.3.0</version>
<dependencies>
<dependency>
<groupId>com.puppycrawl.tools</groupId>

View file

@ -51,6 +51,7 @@ public class ClustersProperties {
List<Masking> masking;
Long pollingThrottleRate;
TruststoreConfig ssl;
AuditProperties audit;
}
@Data
@ -143,6 +144,17 @@ public class ClustersProperties {
}
}
@Data
@NoArgsConstructor
@AllArgsConstructor
public static class AuditProperties {
String topic;
Integer auditTopicsPartitions;
Boolean topicAuditEnabled;
Boolean consoleAuditEnabled;
Map<String, String> auditTopicProperties;
}
@PostConstruct
public void validateAndSetDefaults() {
if (clusters != null) {

View file

@ -1,18 +1,41 @@
package com.provectus.kafka.ui.config;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpResponse;
import org.springframework.web.reactive.config.CorsRegistry;
import org.springframework.web.reactive.config.WebFluxConfigurer;
import org.springframework.web.server.ServerWebExchange;
import org.springframework.web.server.WebFilter;
import org.springframework.web.server.WebFilterChain;
import reactor.core.publisher.Mono;
@Configuration
public class CorsGlobalConfiguration implements WebFluxConfigurer {
public class CorsGlobalConfiguration {
@Override
public void addCorsMappings(CorsRegistry registry) {
registry.addMapping("/**")
.allowedOrigins("*")
.allowedMethods("*")
.allowedHeaders("*")
.allowCredentials(false);
@Bean
public WebFilter corsFilter() {
return (final ServerWebExchange ctx, final WebFilterChain chain) -> {
final ServerHttpRequest request = ctx.getRequest();
final ServerHttpResponse response = ctx.getResponse();
final HttpHeaders headers = response.getHeaders();
headers.add("Access-Control-Allow-Origin", "*");
headers.add("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, OPTIONS");
headers.add("Access-Control-Max-Age", "3600");
headers.add("Access-Control-Allow-Headers", "Content-Type");
if (request.getMethod() == HttpMethod.OPTIONS) {
response.setStatusCode(HttpStatus.OK);
return Mono.empty();
}
return chain.filter(ctx);
};
}
}

View file

@ -13,6 +13,7 @@ abstract class AbstractAuthSecurityConfig {
"/resources/**",
"/actuator/health/**",
"/actuator/info",
"/actuator/prometheus",
"/auth",
"/login",
"/logout",

View file

@ -7,12 +7,10 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
import org.springframework.security.config.web.server.ServerHttpSecurity;
import org.springframework.security.web.server.SecurityWebFilterChain;
import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
@Configuration
@EnableWebFluxSecurity
@ -33,15 +31,17 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
return http
.addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
.csrf().disable()
.authorizeExchange()
.pathMatchers(AUTH_WHITELIST).permitAll()
.anyExchange().authenticated()
.and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
.and().logout().logoutSuccessHandler(logoutSuccessHandler)
.and().build();
return http.authorizeExchange(spec -> spec
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
)
.formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
.logout(spec -> spec.logoutSuccessHandler(logoutSuccessHandler))
.csrf(ServerHttpSecurity.CsrfSpec::disable)
.build();
}
}

View file

@ -27,10 +27,12 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
System.exit(1);
}
log.warn("Authentication is disabled. Access will be unrestricted.");
return http.authorizeExchange()
.anyExchange().permitAll()
.and()
.csrf().disable()
return http.authorizeExchange(spec -> spec
.anyExchange()
.permitAll()
)
.csrf(ServerHttpSecurity.CsrfSpec::disable)
.build();
}

View file

@ -15,6 +15,8 @@ public class LdapProperties {
private String userFilterSearchBase;
private String userFilterSearchFilter;
private String groupFilterSearchBase;
private String groupFilterSearchFilter;
private String groupRoleAttribute;
@Value("${oauth2.ldap.activeDirectory:false}")
private boolean isActiveDirectory;

View file

@ -3,14 +3,16 @@ package com.provectus.kafka.ui.config.auth;
import static com.provectus.kafka.ui.config.auth.AbstractAuthSecurityConfig.AUTH_WHITELIST;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import com.provectus.kafka.ui.service.rbac.extractor.RbacLdapAuthoritiesExtractor;
import java.util.Collection;
import java.util.List;
import javax.annotation.Nullable;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.ApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Import;
@ -22,6 +24,7 @@ import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.ProviderManager;
import org.springframework.security.authentication.ReactiveAuthenticationManager;
import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
import org.springframework.security.config.Customizer;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.ServerHttpSecurity;
import org.springframework.security.core.GrantedAuthority;
@ -50,9 +53,9 @@ public class LdapSecurityConfig {
@Bean
public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource,
LdapAuthoritiesPopulator ldapAuthoritiesPopulator,
@Nullable AccessControlService acs) {
var rbacEnabled = acs != null && acs.isRbacEnabled();
LdapAuthoritiesPopulator authoritiesExtractor,
AccessControlService acs) {
var rbacEnabled = acs.isRbacEnabled();
BindAuthenticator ba = new BindAuthenticator(contextSource);
if (props.getBase() != null) {
ba.setUserDnPatterns(new String[] {props.getBase()});
@ -67,7 +70,7 @@ public class LdapSecurityConfig {
AbstractLdapAuthenticationProvider authenticationProvider;
if (!props.isActiveDirectory()) {
authenticationProvider = rbacEnabled
? new LdapAuthenticationProvider(ba, ldapAuthoritiesPopulator)
? new LdapAuthenticationProvider(ba, authoritiesExtractor)
: new LdapAuthenticationProvider(ba);
} else {
authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(props.getActiveDirectoryDomain(),
@ -97,11 +100,24 @@ public class LdapSecurityConfig {
@Bean
@Primary
public LdapAuthoritiesPopulator ldapAuthoritiesPopulator(BaseLdapPathContextSource contextSource) {
var authoritiesPopulator = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
authoritiesPopulator.setRolePrefix("");
authoritiesPopulator.setConvertToUpperCase(false);
return authoritiesPopulator;
public DefaultLdapAuthoritiesPopulator ldapAuthoritiesExtractor(ApplicationContext context,
BaseLdapPathContextSource contextSource,
AccessControlService acs) {
var rbacEnabled = acs != null && acs.isRbacEnabled();
DefaultLdapAuthoritiesPopulator extractor;
if (rbacEnabled) {
extractor = new RbacLdapAuthoritiesExtractor(context, contextSource, props.getGroupFilterSearchBase());
} else {
extractor = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
}
Optional.ofNullable(props.getGroupFilterSearchFilter()).ifPresent(extractor::setGroupSearchFilter);
extractor.setRolePrefix("");
extractor.setConvertToUpperCase(false);
extractor.setSearchSubtree(true);
return extractor;
}
@Bean
@ -111,21 +127,15 @@ public class LdapSecurityConfig {
log.info("Active Directory support for LDAP has been enabled.");
}
return http
.authorizeExchange()
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
.and()
.formLogin()
.and()
.logout()
.and()
.csrf().disable()
return http.authorizeExchange(spec -> spec
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
)
.formLogin(Customizer.withDefaults())
.logout(Customizer.withDefaults())
.csrf(ServerHttpSecurity.CsrfSpec::disable)
.build();
}

View file

@ -12,10 +12,11 @@ import lombok.extern.log4j.Log4j2;
import org.jetbrains.annotations.Nullable;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.security.config.Customizer;
import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
import org.springframework.security.config.web.server.ServerHttpSecurity;
@ -49,21 +50,15 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
log.info("Configuring OAUTH2 authentication.");
return http.authorizeExchange()
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
.and()
.oauth2Login()
.and()
.logout()
.logoutSuccessHandler(logoutHandler)
.and()
.csrf().disable()
return http.authorizeExchange(spec -> spec
.pathMatchers(AUTH_WHITELIST)
.permitAll()
.anyExchange()
.authenticated()
)
.oauth2Login(Customizer.withDefaults())
.logout(spec -> spec.logoutSuccessHandler(logoutHandler))
.csrf(ServerHttpSecurity.CsrfSpec::disable)
.build();
}
@ -103,7 +98,10 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
final List<ClientRegistration> registrations =
new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
if (registrations.isEmpty()) {
throw new IllegalArgumentException("OAuth2 authentication is enabled but no providers specified.");
}
return new InMemoryReactiveClientRegistrationRepository(registrations);
}

View file

@ -1,13 +1,14 @@
package com.provectus.kafka.ui.config.auth.condition;
import com.provectus.kafka.ui.service.rbac.AbstractProviderCondition;
import org.jetbrains.annotations.NotNull;
import org.springframework.context.annotation.Condition;
import org.springframework.context.annotation.ConditionContext;
import org.springframework.core.type.AnnotatedTypeMetadata;
public class CognitoCondition extends AbstractProviderCondition implements Condition {
@Override
public boolean matches(final ConditionContext context, final AnnotatedTypeMetadata metadata) {
public boolean matches(final ConditionContext context, final @NotNull AnnotatedTypeMetadata metadata) {
return getRegisteredProvidersTypes(context.getEnvironment()).stream().anyMatch(a -> a.equalsIgnoreCase("cognito"));
}
}
}

View file

@ -2,12 +2,16 @@ package com.provectus.kafka.ui.controller;
import com.provectus.kafka.ui.api.AclsApi;
import com.provectus.kafka.ui.mapper.ClusterMapper;
import com.provectus.kafka.ui.model.CreateConsumerAclDTO;
import com.provectus.kafka.ui.model.CreateProducerAclDTO;
import com.provectus.kafka.ui.model.CreateStreamAppAclDTO;
import com.provectus.kafka.ui.model.KafkaAclDTO;
import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
import com.provectus.kafka.ui.service.acl.AclsService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Optional;
import lombok.RequiredArgsConstructor;
@ -26,6 +30,7 @@ public class AclsController extends AbstractController implements AclsApi {
private final AclsService aclsService;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
@ -33,12 +38,14 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createAcl")
.build();
return accessControlService.validateAccess(context)
.then(kafkaAclDto)
.map(ClusterMapper::toAclBinding)
.flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@ -48,12 +55,14 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("deleteAcl")
.build();
return accessControlService.validateAccess(context)
.then(kafkaAclDto)
.map(ClusterMapper::toAclBinding)
.flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@ -66,6 +75,7 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.VIEW)
.operationName("listAcls")
.build();
var resourceType = Optional.ofNullable(resourceTypeDto)
@ -83,7 +93,7 @@ public class AclsController extends AbstractController implements AclsApi {
ResponseEntity.ok(
aclsService.listAcls(getCluster(clusterName), filter)
.map(ClusterMapper::toKafkaAclDto)))
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -91,12 +101,14 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.VIEW)
.operationName("getAclAsCsv")
.build();
return accessControlService.validateAccess(context).then(
aclsService.getAclAsCsvString(getCluster(clusterName))
.map(ResponseEntity::ok)
.flatMap(Mono::just)
.doOnEach(sig -> auditService.audit(context, sig))
);
}
@ -105,11 +117,64 @@ public class AclsController extends AbstractController implements AclsApi {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("syncAclsCsv")
.build();
return accessControlService.validateAccess(context)
.then(csvMono)
.flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<Void>> createConsumerAcl(String clusterName,
Mono<CreateConsumerAclDTO> createConsumerAclDto,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createConsumerAcl")
.build();
return accessControlService.validateAccess(context)
.then(createConsumerAclDto)
.flatMap(req -> aclsService.createConsumerAcl(getCluster(clusterName), req))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<Void>> createProducerAcl(String clusterName,
Mono<CreateProducerAclDTO> createProducerAclDto,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createProducerAcl")
.build();
return accessControlService.validateAccess(context)
.then(createProducerAclDto)
.flatMap(req -> aclsService.createProducerAcl(getCluster(clusterName), req))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<Void>> createStreamAppAcl(String clusterName,
Mono<CreateStreamAppAclDTO> createStreamAppAclDto,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.aclActions(AclAction.EDIT)
.operationName("createStreamAppAcl")
.build();
return accessControlService.validateAccess(context)
.then(createStreamAppAclDto)
.flatMap(req -> aclsService.createStreamAppAcl(getCluster(clusterName), req))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
}

View file

@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.ApplicationInfoService;
import com.provectus.kafka.ui.service.KafkaClusterFactory;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import com.provectus.kafka.ui.util.ApplicationRestarter;
import com.provectus.kafka.ui.util.DynamicConfigOperations;
@ -55,6 +56,7 @@ public class ApplicationConfigController implements ApplicationConfigApi {
private final ApplicationRestarter restarter;
private final KafkaClusterFactory kafkaClusterFactory;
private final ApplicationInfoService applicationInfoService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
@ -63,62 +65,68 @@ public class ApplicationConfigController implements ApplicationConfigApi {
@Override
public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
return accessControlService
.validateAccess(
AccessContext.builder()
.applicationConfigActions(VIEW)
.build()
)
var context = AccessContext.builder()
.applicationConfigActions(VIEW)
.operationName("getCurrentConfig")
.build();
return accessControlService.validateAccess(context)
.then(Mono.fromSupplier(() -> ResponseEntity.ok(
new ApplicationConfigDTO()
.properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
)));
)))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
ServerWebExchange exchange) {
return accessControlService
.validateAccess(
AccessContext.builder()
.applicationConfigActions(EDIT)
.build()
)
var context = AccessContext.builder()
.applicationConfigActions(EDIT)
.operationName("restartWithConfig")
.build();
return accessControlService.validateAccess(context)
.then(restartRequestDto)
.map(dto -> {
.<ResponseEntity<Void>>map(dto -> {
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
restarter.requestRestart();
return ResponseEntity.ok().build();
});
})
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
ServerWebExchange exchange) {
return accessControlService
.validateAccess(
AccessContext.builder()
.applicationConfigActions(EDIT)
.build()
)
var context = AccessContext.builder()
.applicationConfigActions(EDIT)
.operationName("uploadConfigRelatedFile")
.build();
return accessControlService.validateAccess(context)
.then(fileFlux.single())
.flatMap(file ->
dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
.map(path -> new UploadedFileInfoDTO().location(path.toString()))
.map(ResponseEntity::ok));
.map(ResponseEntity::ok))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
ServerWebExchange exchange) {
return configDto
var context = AccessContext.builder()
.applicationConfigActions(EDIT)
.operationName("validateConfig")
.build();
return accessControlService.validateAccess(context)
.then(configDto)
.flatMap(config -> {
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
ClustersProperties clustersProperties = propertiesStructure.getKafka();
return validateClustersConfig(clustersProperties)
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
})
.map(ResponseEntity::ok);
.map(ResponseEntity::ok)
.doOnEach(sig -> auditService.audit(context, sig));
}
private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(

View file

@ -11,8 +11,11 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
import com.provectus.kafka.ui.service.BrokerService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.ResponseEntity;
@ -27,61 +30,78 @@ import reactor.core.publisher.Mono;
public class BrokersController extends AbstractController implements BrokersApi {
private final BrokerService brokerService;
private final ClusterMapper clusterMapper;
private final AuditService auditService;
private final AccessControlService accessControlService;
@Override
public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.build());
.operationName("getBrokers")
.build();
var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
return validateAccess.thenReturn(ResponseEntity.ok(job));
return accessControlService.validateAccess(context)
.thenReturn(ResponseEntity.ok(job))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.build());
.operationName("getBrokersMetrics")
.operationParams(Map.of("id", id))
.build();
return validateAccess.then(
brokerService.getBrokerMetrics(getCluster(clusterName), id)
.map(clusterMapper::toBrokerMetrics)
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
);
return accessControlService.validateAccess(context)
.then(
brokerService.getBrokerMetrics(getCluster(clusterName), id)
.map(clusterMapper::toBrokerMetrics)
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
List<Integer> brokers,
@Nullable List<Integer> brokers,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.build());
return validateAccess.thenReturn(ResponseEntity.ok(
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
List<Integer> brokerIds = brokers == null ? List.of() : brokers;
var context = AccessContext.builder()
.cluster(clusterName)
.operationName("getAllBrokersLogdirs")
.operationParams(Map.of("brokerIds", brokerIds))
.build();
return accessControlService.validateAccess(context)
.thenReturn(ResponseEntity.ok(
brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
Integer id,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW)
.build());
.operationName("getBrokerConfig")
.operationParams(Map.of("brokerId", id))
.build();
return validateAccess.thenReturn(
return accessControlService.validateAccess(context).thenReturn(
ResponseEntity.ok(
brokerService.getBrokerConfig(getCluster(clusterName), id)
.map(clusterMapper::toBrokerConfig))
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -89,16 +109,18 @@ public class BrokersController extends AbstractController implements BrokersApi
Integer id,
Mono<BrokerLogdirUpdateDTO> brokerLogdir,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.build());
.operationName("updateBrokerTopicPartitionLogDir")
.operationParams(Map.of("brokerId", id))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
brokerLogdir
.flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -107,16 +129,18 @@ public class BrokersController extends AbstractController implements BrokersApi
String name,
Mono<BrokerConfigItemDTO> brokerConfig,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
.build());
.operationName("updateBrokerConfigByName")
.operationParams(Map.of("brokerId", id))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
brokerConfig
.flatMap(bci -> brokerService.updateBrokerConfigByName(
getCluster(clusterName), id, name, bci.getValue()))
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
}

View file

@ -6,6 +6,7 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
import com.provectus.kafka.ui.model.ClusterStatsDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.ClusterService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -21,6 +22,7 @@ import reactor.core.publisher.Mono;
public class ClustersController extends AbstractController implements ClustersApi {
private final ClusterService clusterService;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
@ -35,6 +37,7 @@ public class ClustersController extends AbstractController implements ClustersAp
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("getClusterMetrics")
.build();
return accessControlService.validateAccess(context)
@ -42,7 +45,8 @@ public class ClustersController extends AbstractController implements ClustersAp
clusterService.getClusterMetrics(getCluster(clusterName))
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
);
)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -50,6 +54,7 @@ public class ClustersController extends AbstractController implements ClustersAp
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("getClusterStats")
.build();
return accessControlService.validateAccess(context)
@ -57,7 +62,8 @@ public class ClustersController extends AbstractController implements ClustersAp
clusterService.getClusterStats(getCluster(clusterName))
.map(ResponseEntity::ok)
.onErrorReturn(ResponseEntity.notFound().build())
);
)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -66,11 +72,11 @@ public class ClustersController extends AbstractController implements ClustersAp
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("updateClusterInfo")
.build();
return accessControlService.validateAccess(context)
.then(
clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
);
.then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
.doOnEach(sig -> auditService.audit(context, sig));
}
}

View file

@ -19,6 +19,7 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import com.provectus.kafka.ui.service.ConsumerGroupService;
import com.provectus.kafka.ui.service.OffsetsResetService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Map;
import java.util.Optional;
@ -42,6 +43,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
private final ConsumerGroupService consumerGroupService;
private final OffsetsResetService offsetsResetService;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Value("${consumer.groups.page.size:25}")
private int defaultConsumerGroupsPageSize;
@ -50,44 +52,47 @@ public class ConsumerGroupsController extends AbstractController implements Cons
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
String id,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.consumerGroup(id)
.consumerGroupActions(DELETE)
.build());
.operationName("deleteConsumerGroup")
.build();
return validateAccess.then(
consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
.thenReturn(ResponseEntity.ok().build())
);
return accessControlService.validateAccess(context)
.then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@Override
public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
String consumerGroupId,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.consumerGroup(consumerGroupId)
.consumerGroupActions(VIEW)
.build());
.operationName("getConsumerGroup")
.build();
return validateAccess.then(
consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
return accessControlService.validateAccess(context)
.then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
.map(ConsumerGroupMapper::toDetailsDto)
.map(ResponseEntity::ok)
);
.map(ResponseEntity::ok))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
String topicName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(TopicAction.VIEW)
.build());
.operationName("getTopicConsumerGroups")
.build();
Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
@ -99,7 +104,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
.map(ResponseEntity::ok)
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
return validateAccess.then(job);
return accessControlService.validateAccess(context)
.then(job)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -112,12 +119,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
SortOrderDTO sortOrderDto,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
// consumer group access validation is within the service
.build());
.operationName("getConsumerGroupsPage")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
consumerGroupService.getConsumerGroupsPage(
getCluster(clusterName),
Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
@ -128,7 +136,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
)
.map(this::convertPage)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -137,12 +145,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
Mono<ConsumerGroupOffsetsResetDTO> resetDto,
ServerWebExchange exchange) {
return resetDto.flatMap(reset -> {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(reset.getTopic())
.topicActions(TopicAction.VIEW)
.consumerGroupActions(RESET_OFFSETS)
.build());
.operationName("resetConsumerGroupOffsets")
.build();
Supplier<Mono<Void>> mono = () -> {
var cluster = getCluster(clusterName);
@ -182,7 +191,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
}
};
return validateAccess.then(mono.get());
return accessControlService.validateAccess(context)
.then(mono.get())
.doOnEach(sig -> auditService.audit(context, sig));
}).thenReturn(ResponseEntity.ok().build());
}

View file

@ -18,6 +18,7 @@ import com.provectus.kafka.ui.model.TaskDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
import com.provectus.kafka.ui.service.KafkaConnectService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Comparator;
import java.util.Map;
@ -37,8 +38,10 @@ import reactor.core.publisher.Mono;
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
private final KafkaConnectService kafkaConnectService;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
@ -54,15 +57,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.build());
.operationName("getConnectors")
.build();
return validateAccess.thenReturn(
ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
);
return accessControlService.validateAccess(context)
.thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -70,16 +74,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
@Valid Mono<NewConnectorDTO> connector,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
.build());
.operationName("createConnector")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -87,17 +92,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.connector(connectorName)
.build());
.operationName("getConnector")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -105,16 +111,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.build());
.operationName("deleteConnector")
.operationParams(Map.of("connectorName", connectName))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@ -126,14 +134,23 @@ public class KafkaConnectController extends AbstractController implements KafkaC
SortOrderDTO sortOrder,
ServerWebExchange exchange
) {
var context = AccessContext.builder()
.cluster(clusterName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.operationName("getAllConnectors")
.build();
var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
? getConnectorsComparator(orderBy)
: getConnectorsComparator(orderBy).reversed();
Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
.filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
.filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName));
.filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName))
.sort(comparator);
return Mono.just(ResponseEntity.ok(job.sort(comparator)));
return Mono.just(ResponseEntity.ok(job))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -142,17 +159,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.build());
.operationName("getConnectorConfig")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
kafkaConnectService
.getConnectorConfig(getCluster(clusterName), connectName, connectorName)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -161,16 +179,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
Mono<Map<String, Object>> requestBody,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
.build());
.operationName("setConnectorConfig")
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess.then(
kafkaConnectService
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
.map(ResponseEntity::ok));
return accessControlService.validateAccess(context).then(
kafkaConnectService
.setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
.map(ResponseEntity::ok))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -178,7 +199,6 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName,
ConnectorActionDTO action,
ServerWebExchange exchange) {
ConnectAction[] connectActions;
if (RESTART_ACTIONS.contains(action)) {
connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.RESTART};
@ -186,17 +206,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.EDIT};
}
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(connectActions)
.build());
.operationName("updateConnectorState")
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
kafkaConnectService
.updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -204,17 +226,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectName,
String connectorName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.build());
.operationName("getConnectorTasks")
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess.thenReturn(
return accessControlService.validateAccess(context).thenReturn(
ResponseEntity
.ok(kafkaConnectService
.getConnectorTasks(getCluster(clusterName), connectName, connectorName))
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -222,34 +246,37 @@ public class KafkaConnectController extends AbstractController implements KafkaC
String connectorName, Integer taskId,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
.build());
.operationName("restartConnectorTask")
.operationParams(Map.of("connectorName", connectorName))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
kafkaConnectService
.restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
String clusterName, String connectName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.connect(connectName)
.connectActions(ConnectAction.VIEW)
.build());
.operationName("getConnectorPlugins")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
Mono.just(
ResponseEntity.ok(
kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override

View file

@ -9,6 +9,7 @@ import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
@ -29,38 +30,43 @@ public class KsqlController extends AbstractController implements KsqlApi {
private final KsqlServiceV2 ksqlServiceV2;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
Mono<KsqlCommandV2DTO>
ksqlCommand2Dto,
Mono<KsqlCommandV2DTO> ksqlCmdDo,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.build());
return validateAccess.then(
ksqlCommand2Dto.map(dto -> {
var id = ksqlServiceV2.registerCommand(
getCluster(clusterName),
dto.getKsql(),
Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
return new KsqlCommandV2ResponseDTO().pipeId(id);
}).map(ResponseEntity::ok)
);
return ksqlCmdDo.flatMap(
command -> {
var context = AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.operationName("executeKsql")
.operationParams(command)
.build();
return accessControlService.validateAccess(context).thenReturn(
new KsqlCommandV2ResponseDTO().pipeId(
ksqlServiceV2.registerCommand(
getCluster(clusterName),
command.getKsql(),
Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
.doOnEach(sig -> auditService.audit(context, sig));
}
)
.map(ResponseEntity::ok);
}
@Override
public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
String pipeId,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.build());
.operationName("openKsqlResponsePipe")
.build();
return validateAccess.thenReturn(
return accessControlService.validateAccess(context).thenReturn(
ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
.map(table -> new KsqlResponseDTO()
.table(
@ -74,22 +80,28 @@ public class KsqlController extends AbstractController implements KsqlApi {
@Override
public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.build());
.operationName("listStreams")
.build();
return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
return accessControlService.validateAccess(context)
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.ksqlActions(KsqlAction.EXECUTE)
.build());
.operationName("listTables")
.build();
return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
return accessControlService.validateAccess(context)
.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
.doOnEach(sig -> auditService.audit(context, sig));
}
}

View file

@ -15,12 +15,16 @@ import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
import com.provectus.kafka.ui.model.SeekDirectionDTO;
import com.provectus.kafka.ui.model.SeekTypeDTO;
import com.provectus.kafka.ui.model.SerdeUsageDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import com.provectus.kafka.ui.service.DeserializationService;
import com.provectus.kafka.ui.service.MessagesService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
@ -46,25 +50,34 @@ public class MessagesController extends AbstractController implements MessagesAp
private final MessagesService messagesService;
private final DeserializationService deserializationService;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<Void>> deleteTopicMessages(
String clusterName, String topicName, @Valid List<Integer> partitions,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_DELETE)
.build());
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).<ResponseEntity<Void>>then(
messagesService.deleteTopicMessages(
getCluster(clusterName),
topicName,
Optional.ofNullable(partitions).orElse(List.of())
).thenReturn(ResponseEntity.ok().build())
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<SmartFilterTestExecutionResultDTO>> executeSmartFilterTest(
Mono<SmartFilterTestExecutionDTO> smartFilterTestExecutionDto, ServerWebExchange exchange) {
return smartFilterTestExecutionDto
.map(MessagesService::execSmartFilterTest)
.map(ResponseEntity::ok);
}
@Override
@ -79,11 +92,15 @@ public class MessagesController extends AbstractController implements MessagesAp
String keySerde,
String valueSerde,
ServerWebExchange exchange) {
final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var contextBuilder = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.build());
.operationName("getTopicMessages");
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
contextBuilder.auditActions(AuditAction.VIEW);
}
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
@ -102,7 +119,10 @@ public class MessagesController extends AbstractController implements MessagesAp
)
);
return validateAccess.then(job);
var context = contextBuilder.build();
return accessControlService.validateAccess(context)
.then(job)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -110,17 +130,18 @@ public class MessagesController extends AbstractController implements MessagesAp
String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_PRODUCE)
.build());
.operationName("sendTopicMessages")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
createTopicMessage.flatMap(msg ->
messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
).map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
/**
@ -156,12 +177,12 @@ public class MessagesController extends AbstractController implements MessagesAp
String topicName,
SerdeUsageDTO use,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(TopicAction.VIEW)
.build());
.operationName("getSerdes")
.build();
TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
.key(use == SerdeUsageDTO.SERIALIZE
@ -171,10 +192,14 @@ public class MessagesController extends AbstractController implements MessagesAp
? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
: deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
return validateAccess.then(
return accessControlService.validateAccess(context).then(
Mono.just(dto)
.subscribeOn(Schedulers.boundedElastic())
.map(ResponseEntity::ok)
);
}
}

View file

@ -13,8 +13,10 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
import com.provectus.kafka.ui.service.SchemaRegistryService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
@ -37,6 +39,7 @@ public class SchemasController extends AbstractController implements SchemasApi
private final SchemaRegistryService schemaRegistryService;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
protected KafkaCluster getCluster(String clusterName) {
@ -51,13 +54,14 @@ public class SchemasController extends AbstractController implements SchemasApi
public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.VIEW)
.build());
.operationName("checkSchemaCompatibility")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
newSchemaSubjectMono.flatMap(subjectDTO ->
schemaRegistryService.checksSchemaCompatibility(
getCluster(clusterName),
@ -66,19 +70,20 @@ public class SchemasController extends AbstractController implements SchemasApi
))
.map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schemaActions(SchemaAction.CREATE)
.build());
.operationName("createNewSchema")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
newSchemaSubjectMono.flatMap(newSubject ->
schemaRegistryService.registerNewSchema(
getCluster(clusterName),
@ -87,20 +92,22 @@ public class SchemasController extends AbstractController implements SchemasApi
)
).map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Void>> deleteLatestSchema(
String clusterName, String subject, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.DELETE)
.build());
.operationName("deleteLatestSchema")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -108,14 +115,16 @@ public class SchemasController extends AbstractController implements SchemasApi
@Override
public Mono<ResponseEntity<Void>> deleteSchema(
String clusterName, String subject, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.DELETE)
.build());
.operationName("deleteSchema")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -123,14 +132,16 @@ public class SchemasController extends AbstractController implements SchemasApi
@Override
public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subjectName)
.schemaActions(SchemaAction.DELETE)
.build());
.operationName("deleteSchemaByVersion")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -138,16 +149,20 @@ public class SchemasController extends AbstractController implements SchemasApi
@Override
public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
String clusterName, String subjectName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subjectName)
.schemaActions(SchemaAction.VIEW)
.build());
.operationName("getAllVersionsBySubject")
.build();
Flux<SchemaSubjectDTO> schemas =
schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
.map(kafkaSrMapper::toDto);
return validateAccess.thenReturn(ResponseEntity.ok(schemas));
return accessControlService.validateAccess(context)
.thenReturn(ResponseEntity.ok(schemas))
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -163,34 +178,37 @@ public class SchemasController extends AbstractController implements SchemasApi
public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
String subject,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.VIEW)
.build());
.operationName("getLatestSchema")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
.map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
String clusterName, String subject, Integer version, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schema(subject)
.schemaActions(SchemaAction.VIEW)
.build());
.operationName("getSchemaByVersion")
.operationParams(Map.of("subject", subject, "version", version))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
schemaRegistryService.getSchemaSubjectByVersion(
getCluster(clusterName), subject, version)
.map(kafkaSrMapper::toDto)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -199,6 +217,11 @@ public class SchemasController extends AbstractController implements SchemasApi
@Valid Integer perPage,
@Valid String search,
ServerWebExchange serverWebExchange) {
var context = AccessContext.builder()
.cluster(clusterName)
.operationName("getSchemas")
.build();
return schemaRegistryService
.getAllSubjectNames(getCluster(clusterName))
.flatMapIterable(l -> l)
@ -220,25 +243,28 @@ public class SchemasController extends AbstractController implements SchemasApi
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
}).map(ResponseEntity::ok);
}).map(ResponseEntity::ok)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
.build());
.operationName("updateGlobalSchemaCompatibilityLevel")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
compatibilityLevelMono
.flatMap(compatibilityLevelDTO ->
schemaRegistryService.updateGlobalSchemaCompatibility(
getCluster(clusterName),
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -247,12 +273,14 @@ public class SchemasController extends AbstractController implements SchemasApi
public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.schemaActions(SchemaAction.EDIT)
.build());
.operationName("updateSchemaCompatibilityLevel")
.operationParams(Map.of("subject", subject))
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
compatibilityLevelMono
.flatMap(compatibilityLevelDTO ->
schemaRegistryService.updateSchemaCompatibility(
@ -260,6 +288,7 @@ public class SchemasController extends AbstractController implements SchemasApi
subject,
kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}

View file

@ -27,9 +27,11 @@ import com.provectus.kafka.ui.model.TopicsResponseDTO;
import com.provectus.kafka.ui.model.rbac.AccessContext;
import com.provectus.kafka.ui.service.TopicsService;
import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
import com.provectus.kafka.ui.service.audit.AuditService;
import com.provectus.kafka.ui.service.rbac.AccessControlService;
import java.util.Comparator;
import java.util.List;
import java.util.Map;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@ -52,69 +54,80 @@ public class TopicsController extends AbstractController implements TopicsApi {
private final TopicAnalysisService topicAnalysisService;
private final ClusterMapper clusterMapper;
private final AccessControlService accessControlService;
private final AuditService auditService;
@Override
public Mono<ResponseEntity<TopicDTO>> createTopic(
String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
String clusterName, @Valid Mono<TopicCreationDTO> topicCreationMono, ServerWebExchange exchange) {
return topicCreationMono.flatMap(topicCreation -> {
var context = AccessContext.builder()
.cluster(clusterName)
.topicActions(CREATE)
.operationName("createTopic")
.operationParams(topicCreation)
.build();
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
.cluster(clusterName)
.topicActions(CREATE)
.build());
return validateAccess.then(
topicsService.createTopic(getCluster(clusterName), topicCreation)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
);
return accessControlService.validateAccess(context)
.then(topicsService.createTopic(getCluster(clusterName), topicCreation))
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.OK))
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
.doOnEach(sig -> auditService.audit(context, sig));
});
}
@Override
public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
String topicName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, CREATE, DELETE)
.build());
.operationName("recreateTopic")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
topicsService.recreateTopic(getCluster(clusterName), topicName)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<TopicDTO>> cloneTopic(
String clusterName, String topicName, String newTopicName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, CREATE)
.build());
.operationName("cloneTopic")
.operationParams(Map.of("newTopicName", newTopicName))
.build();
return validateAccess.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
);
return accessControlService.validateAccess(context)
.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
.map(clusterMapper::toTopic)
.map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Void>> deleteTopic(
String clusterName, String topicName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(DELETE)
.build());
.operationName("deleteTopic")
.build();
return validateAccess.then(
topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok)
);
return accessControlService.validateAccess(context)
.then(
topicsService.deleteTopic(getCluster(clusterName), topicName)
.thenReturn(ResponseEntity.ok().<Void>build())
).doOnEach(sig -> auditService.audit(context, sig));
}
@ -122,13 +135,14 @@ public class TopicsController extends AbstractController implements TopicsApi {
public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
String clusterName, String topicName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.build());
.operationName("getTopicConfigs")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
topicsService.getTopicConfigs(getCluster(clusterName), topicName)
.map(lst -> lst.stream()
.map(InternalTopicConfig::from)
@ -136,24 +150,25 @@ public class TopicsController extends AbstractController implements TopicsApi {
.collect(toList()))
.map(Flux::fromIterable)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
String clusterName, String topicName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW)
.build());
.operationName("getTopicDetails")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
topicsService.getTopicDetails(getCluster(clusterName), topicName)
.map(clusterMapper::toTopicDetails)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -166,13 +181,19 @@ public class TopicsController extends AbstractController implements TopicsApi {
@Valid SortOrderDTO sortOrder,
ServerWebExchange exchange) {
AccessContext context = AccessContext.builder()
.cluster(clusterName)
.operationName("getTopics")
.build();
return topicsService.getTopicsForPagination(getCluster(clusterName))
.flatMap(existingTopics -> {
.flatMap(topics -> accessControlService.filterViewableTopics(topics, clusterName))
.flatMap(topics -> {
int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
var topicsToSkip = ((page != null && page > 0 ? page : 1) - 1) * pageSize;
var comparator = sortOrder == null || !sortOrder.equals(SortOrderDTO.DESC)
? getComparatorForTopic(orderBy) : getComparatorForTopic(orderBy).reversed();
List<InternalTopic> filtered = existingTopics.stream()
List<InternalTopic> filtered = topics.stream()
.filter(topic -> !topic.isInternal()
|| showInternal != null && showInternal)
.filter(topic -> search == null || StringUtils.containsIgnoreCase(topic.getName(), search))
@ -188,15 +209,13 @@ public class TopicsController extends AbstractController implements TopicsApi {
.collect(toList());
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
.flatMapMany(Flux::fromIterable)
.filterWhen(dto -> accessControlService.isTopicAccessible(dto, clusterName))
.collectList()
.map(topicsToRender ->
new TopicsResponseDTO()
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
.pageCount(totalPages));
})
.map(ResponseEntity::ok);
.map(ResponseEntity::ok)
.doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -204,18 +223,19 @@ public class TopicsController extends AbstractController implements TopicsApi {
String clusterName, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, EDIT)
.build());
.operationName("updateTopic")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
topicsService
.updateTopic(getCluster(clusterName), topicName, topicUpdate)
.map(clusterMapper::toTopic)
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -224,17 +244,17 @@ public class TopicsController extends AbstractController implements TopicsApi {
Mono<PartitionsIncreaseDTO> partitionsIncrease,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, EDIT)
.build());
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
partitionsIncrease.flatMap(partitions ->
topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
).map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
@ -243,31 +263,34 @@ public class TopicsController extends AbstractController implements TopicsApi {
Mono<ReplicationFactorChangeDTO> replicationFactorChange,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(VIEW, EDIT)
.build());
.operationName("changeReplicationFactor")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
replicationFactorChange
.flatMap(rfc ->
topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
.map(ResponseEntity::ok)
);
).doOnEach(sig -> auditService.audit(context, sig));
}
@Override
public Mono<ResponseEntity<Void>> analyzeTopic(String clusterName, String topicName, ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.build());
.operationName("analyzeTopic")
.build();
return validateAccess.then(
return accessControlService.validateAccess(context).then(
topicAnalysisService.analyze(getCluster(clusterName), topicName)
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build())
);
}
@ -275,15 +298,17 @@ public class TopicsController extends AbstractController implements TopicsApi {
@Override
public Mono<ResponseEntity<Void>> cancelTopicAnalysis(String clusterName, String topicName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.build());
.operationName("cancelTopicAnalysis")
.build();
topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName);
return validateAccess.thenReturn(ResponseEntity.ok().build());
return accessControlService.validateAccess(context)
.then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
.doOnEach(sig -> auditService.audit(context, sig))
.thenReturn(ResponseEntity.ok().build());
}
@ -292,15 +317,18 @@ public class TopicsController extends AbstractController implements TopicsApi {
String topicName,
ServerWebExchange exchange) {
Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
var context = AccessContext.builder()
.cluster(clusterName)
.topic(topicName)
.topicActions(MESSAGES_READ)
.build());
.operationName("getTopicAnalysis")
.build();
return validateAccess.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
.map(ResponseEntity::ok)
.orElseGet(() -> ResponseEntity.notFound().build()));
return accessControlService.validateAccess(context)
.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
.map(ResponseEntity::ok)
.orElseGet(() -> ResponseEntity.notFound().build()))
.doOnEach(sig -> auditService.audit(context, sig));
}
private Comparator<InternalTopic> getComparatorForTopic(

View file

@ -2,37 +2,28 @@ package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import java.time.Duration;
import java.time.Instant;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
public abstract class AbstractEmitter {
private final MessagesProcessing messagesProcessing;
private final PollingThrottler throttler;
protected final PollingSettings pollingSettings;
protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
this.messagesProcessing = messagesProcessing;
this.pollingSettings = pollingSettings;
this.throttler = pollingSettings.getPollingThrottler();
}
protected ConsumerRecords<Bytes, Bytes> poll(
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
protected PolledRecords poll(
FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer) {
return poll(sink, consumer, pollingSettings.getPollTimeout());
}
protected ConsumerRecords<Bytes, Bytes> poll(
FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer, Duration timeout) {
Instant start = Instant.now();
ConsumerRecords<Bytes, Bytes> records = consumer.poll(timeout);
Instant finish = Instant.now();
int polledBytes = sendConsuming(sink, records, Duration.between(start, finish).toMillis());
throttler.throttleAfterPoll(polledBytes);
protected PolledRecords poll(FluxSink<TopicMessageEventDTO> sink, EnhancedConsumer consumer, Duration timeout) {
var records = consumer.pollEnhanced(timeout);
sendConsuming(sink, records);
return records;
}
@ -49,10 +40,8 @@ public abstract class AbstractEmitter {
messagesProcessing.sendPhase(sink, name);
}
protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecords<Bytes, Bytes> records,
long elapsed) {
return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
protected void sendConsuming(FluxSink<TopicMessageEventDTO> sink, PolledRecords records) {
messagesProcessing.sentConsumingInfo(sink, records);
}
protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {

View file

@ -9,9 +9,7 @@ import java.util.List;
import java.util.TreeMap;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.utils.Bytes;
@ -22,12 +20,12 @@ public class BackwardRecordEmitter
extends AbstractEmitter
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
private final Supplier<EnhancedConsumer> consumerSupplier;
private final ConsumerPosition consumerPosition;
private final int messagesPerPage;
public BackwardRecordEmitter(
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
Supplier<EnhancedConsumer> consumerSupplier,
ConsumerPosition consumerPosition,
int messagesPerPage,
MessagesProcessing messagesProcessing,
@ -41,7 +39,7 @@ public class BackwardRecordEmitter
@Override
public void accept(FluxSink<TopicMessageEventDTO> sink) {
log.debug("Starting backward polling for {}", consumerPosition);
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
try (EnhancedConsumer consumer = consumerSupplier.get()) {
sendPhase(sink, "Created consumer");
var seekOperations = SeekOperations.create(consumer, consumerPosition);
@ -91,7 +89,7 @@ public class BackwardRecordEmitter
TopicPartition tp,
long fromOffset,
long toOffset,
Consumer<Bytes, Bytes> consumer,
EnhancedConsumer consumer,
FluxSink<TopicMessageEventDTO> sink
) {
consumer.assign(Collections.singleton(tp));
@ -101,13 +99,13 @@ public class BackwardRecordEmitter
var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
while (!sink.isCancelled()
&& !sendLimitReached()
&& recordsToSend.size() < desiredMsgsToPoll
&& !emptyPolls.noDataEmptyPollsReached()) {
var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
emptyPolls.count(polledRecords);
emptyPolls.count(polledRecords.count());
log.debug("{} records polled from {}", polledRecords.count(), tp);
@ -115,7 +113,7 @@ public class BackwardRecordEmitter
.filter(r -> r.offset() < toOffset)
.toList();
if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
if (polledRecords.count() > 0 && filteredRecords.isEmpty()) {
// we already read all messages in target offsets interval
break;
}

View file

@ -2,9 +2,6 @@ package com.provectus.kafka.ui.emitter;
import com.provectus.kafka.ui.model.TopicMessageConsumingDTO;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
class ConsumingStats {
@ -13,23 +10,17 @@ class ConsumingStats {
private int records = 0;
private long elapsed = 0;
/**
* returns bytes polled.
*/
int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecords<Bytes, Bytes> polledRecords,
long elapsed,
void sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
PolledRecords polledRecords,
int filterApplyErrors) {
int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
bytes += polledBytes;
bytes += polledRecords.bytes();
this.records += polledRecords.count();
this.elapsed += elapsed;
this.elapsed += polledRecords.elapsed().toMillis();
sink.next(
new TopicMessageEventDTO()
.type(TopicMessageEventDTO.TypeEnum.CONSUMING)
.consuming(createConsumingStats(sink, filterApplyErrors))
);
return polledBytes;
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {

View file

@ -17,8 +17,8 @@ public class EmptyPollsCounter {
this.maxEmptyPolls = maxEmptyPolls;
}
public void count(ConsumerRecords<?, ?> polled) {
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
public void count(int polledCount) {
emptyPolls = polledCount == 0 ? emptyPolls + 1 : 0;
}
public boolean noDataEmptyPollsReached() {

View file

@ -0,0 +1,82 @@
package com.provectus.kafka.ui.emitter;
import com.google.common.base.Preconditions;
import com.google.common.base.Stopwatch;
import com.provectus.kafka.ui.util.ApplicationMetrics;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Pattern;
import java.util.stream.Collectors;
import lombok.RequiredArgsConstructor;
import lombok.experimental.Delegate;
import org.apache.kafka.clients.consumer.Consumer;
import org.apache.kafka.clients.consumer.ConsumerRebalanceListener;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.BytesDeserializer;
import org.apache.kafka.common.utils.Bytes;
public class EnhancedConsumer extends KafkaConsumer<Bytes, Bytes> {
private final PollingThrottler throttler;
private final ApplicationMetrics metrics;
private String pollingTopic;
public EnhancedConsumer(Properties properties,
PollingThrottler throttler,
ApplicationMetrics metrics) {
super(properties, new BytesDeserializer(), new BytesDeserializer());
this.throttler = throttler;
this.metrics = metrics;
metrics.activeConsumers().incrementAndGet();
}
public PolledRecords pollEnhanced(Duration dur) {
var stopwatch = Stopwatch.createStarted();
ConsumerRecords<Bytes, Bytes> polled = poll(dur);
PolledRecords polledEnhanced = PolledRecords.create(polled, stopwatch.elapsed());
var throttled = throttler.throttleAfterPoll(polledEnhanced.bytes());
metrics.meterPolledRecords(pollingTopic, polledEnhanced, throttled);
return polledEnhanced;
}
@Override
public void assign(Collection<TopicPartition> partitions) {
super.assign(partitions);
Set<String> assignedTopics = partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet());
Preconditions.checkState(assignedTopics.size() == 1);
this.pollingTopic = assignedTopics.iterator().next();
}
@Override
public void subscribe(Pattern pattern) {
throw new UnsupportedOperationException();
}
@Override
public void subscribe(Collection<String> topics) {
throw new UnsupportedOperationException();
}
@Override
public void subscribe(Pattern pattern, ConsumerRebalanceListener listener) {
throw new UnsupportedOperationException();
}
@Override
public void subscribe(Collection<String> topics, ConsumerRebalanceListener listener) {
throw new UnsupportedOperationException();
}
@Override
public void close(Duration timeout) {
metrics.activeConsumers().decrementAndGet();
super.close(timeout);
}
}

View file

@ -5,8 +5,6 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
@ -16,11 +14,11 @@ public class ForwardRecordEmitter
extends AbstractEmitter
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
private final Supplier<EnhancedConsumer> consumerSupplier;
private final ConsumerPosition position;
public ForwardRecordEmitter(
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
Supplier<EnhancedConsumer> consumerSupplier,
ConsumerPosition position,
MessagesProcessing messagesProcessing,
PollingSettings pollingSettings) {
@ -32,7 +30,7 @@ public class ForwardRecordEmitter
@Override
public void accept(FluxSink<TopicMessageEventDTO> sink) {
log.debug("Starting forward polling for {}", position);
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
try (EnhancedConsumer consumer = consumerSupplier.get()) {
sendPhase(sink, "Assigning partitions");
var seekOperations = SeekOperations.create(consumer, position);
seekOperations.assignAndSeekNonEmptyPartitions();
@ -44,8 +42,8 @@ public class ForwardRecordEmitter
&& !emptyPolls.noDataEmptyPollsReached()) {
sendPhase(sink, "Polling");
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
emptyPolls.count(records);
var records = poll(sink, consumer);
emptyPolls.count(records.count());
log.debug("{} records polled", records.count());

View file

@ -8,7 +8,6 @@ import java.util.function.Predicate;
import javax.annotation.Nullable;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
@ -54,13 +53,10 @@ public class MessagesProcessing {
}
}
int sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink,
ConsumerRecords<Bytes, Bytes> polledRecords,
long elapsed) {
void sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink, PolledRecords polledRecords) {
if (!sink.isCancelled()) {
return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors);
consumingStats.sendConsumingEvt(sink, polledRecords, filterApplyErrors);
}
return 0;
}
void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {

View file

@ -0,0 +1,48 @@
package com.provectus.kafka.ui.emitter;
import java.time.Duration;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.utils.Bytes;
public record PolledRecords(int count,
int bytes,
Duration elapsed,
ConsumerRecords<Bytes, Bytes> records) implements Iterable<ConsumerRecord<Bytes, Bytes>> {
static PolledRecords create(ConsumerRecords<Bytes, Bytes> polled, Duration pollDuration) {
return new PolledRecords(
polled.count(),
calculatePolledRecSize(polled),
pollDuration,
polled
);
}
public List<ConsumerRecord<Bytes, Bytes>> records(TopicPartition tp) {
return records.records(tp);
}
@Override
public Iterator<ConsumerRecord<Bytes, Bytes>> iterator() {
return records.iterator();
}
private static int calculatePolledRecSize(Iterable<ConsumerRecord<Bytes, Bytes>> recs) {
int polledBytes = 0;
for (ConsumerRecord<Bytes, Bytes> rec : recs) {
for (Header header : rec.headers()) {
polledBytes +=
(header.key() != null ? header.key().getBytes().length : 0)
+ (header.value() != null ? header.value().length : 0);
}
polledBytes += rec.key() == null ? 0 : rec.serializedKeySize();
polledBytes += rec.value() == null ? 0 : rec.serializedValueSize();
}
return polledBytes;
}
}

View file

@ -3,11 +3,8 @@ package com.provectus.kafka.ui.emitter;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.RateLimiter;
import com.provectus.kafka.ui.config.ClustersProperties;
import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.common.utils.Bytes;
@Slf4j
public class PollingThrottler {
@ -36,18 +33,17 @@ public class PollingThrottler {
return new PollingThrottler("noop", RateLimiter.create(Long.MAX_VALUE));
}
public void throttleAfterPoll(int polledBytes) {
//returns true if polling was throttled
public boolean throttleAfterPoll(int polledBytes) {
if (polledBytes > 0) {
double sleptSeconds = rateLimiter.acquire(polledBytes);
if (!throttled && sleptSeconds > 0.0) {
throttled = true;
log.debug("Polling throttling enabled for cluster {} at rate {} bytes/sec", clusterName, rateLimiter.getRate());
return true;
}
}
}
public void throttleAfterPoll(ConsumerRecords<Bytes, Bytes> polled) {
throttleAfterPoll(ConsumerRecordsUtil.calculatePolledSize(polled));
return false;
}
}

View file

@ -5,19 +5,17 @@ import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import java.util.HashMap;
import java.util.function.Supplier;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.InterruptException;
import org.apache.kafka.common.utils.Bytes;
import reactor.core.publisher.FluxSink;
@Slf4j
public class TailingEmitter extends AbstractEmitter
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
private final Supplier<EnhancedConsumer> consumerSupplier;
private final ConsumerPosition consumerPosition;
public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
public TailingEmitter(Supplier<EnhancedConsumer> consumerSupplier,
ConsumerPosition consumerPosition,
MessagesProcessing messagesProcessing,
PollingSettings pollingSettings) {
@ -29,7 +27,7 @@ public class TailingEmitter extends AbstractEmitter
@Override
public void accept(FluxSink<TopicMessageEventDTO> sink) {
log.debug("Starting tailing polling for {}", consumerPosition);
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
try (EnhancedConsumer consumer = consumerSupplier.get()) {
assignAndSeek(consumer);
while (!sink.isCancelled()) {
sendPhase(sink, "Polling");
@ -47,7 +45,7 @@ public class TailingEmitter extends AbstractEmitter
}
}
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
private void assignAndSeek(EnhancedConsumer consumer) {
var seekOperations = SeekOperations.create(consumer, consumerPosition);
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions

View file

@ -0,0 +1,7 @@
package com.provectus.kafka.ui.exception;
public class JsonAvroConversionException extends ValidationException {
public JsonAvroConversionException(String message) {
super(message);
}
}

View file

@ -1,7 +0,0 @@
package com.provectus.kafka.ui.exception;
public class JsonToAvroConversionException extends ValidationException {
public JsonToAvroConversionException(String message) {
super(message);
}
}

View file

@ -34,7 +34,7 @@ public interface KafkaConnectMapper {
com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
connectorPluginConfigValidationResponse);
default FullConnectorInfoDTO fullConnectorInfoFromTuple(InternalConnectInfo connectInfo) {
default FullConnectorInfoDTO fullConnectorInfo(InternalConnectInfo connectInfo) {
ConnectorDTO connector = connectInfo.getConnector();
List<TaskDTO> tasks = connectInfo.getTasks();
int failedTasksCount = (int) tasks.stream()

View file

@ -3,18 +3,21 @@ package com.provectus.kafka.ui.mapper;
import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
import com.provectus.kafka.ui.model.NewSchemaSubjectDTO;
import com.provectus.kafka.ui.model.SchemaReferenceDTO;
import com.provectus.kafka.ui.model.SchemaSubjectDTO;
import com.provectus.kafka.ui.model.SchemaTypeDTO;
import com.provectus.kafka.ui.service.SchemaRegistryService;
import com.provectus.kafka.ui.sr.model.Compatibility;
import com.provectus.kafka.ui.sr.model.CompatibilityCheckResponse;
import com.provectus.kafka.ui.sr.model.NewSubject;
import com.provectus.kafka.ui.sr.model.SchemaReference;
import com.provectus.kafka.ui.sr.model.SchemaType;
import java.util.List;
import java.util.Optional;
import org.mapstruct.Mapper;
@Mapper(componentModel = "spring")
@Mapper
public interface KafkaSrMapper {
default SchemaSubjectDTO toDto(SchemaRegistryService.SubjectWithCompatibilityLevel s) {
@ -24,9 +27,12 @@ public interface KafkaSrMapper {
.subject(s.getSubject())
.schema(s.getSchema())
.schemaType(SchemaTypeDTO.fromValue(Optional.ofNullable(s.getSchemaType()).orElse(SchemaType.AVRO).getValue()))
.references(toDto(s.getReferences()))
.compatibilityLevel(s.getCompatibility().toString());
}
List<SchemaReferenceDTO> toDto(List<SchemaReference> references);
CompatibilityCheckResponseDTO toDto(CompatibilityCheckResponse ccr);
CompatibilityLevelDTO.CompatibilityEnum toDto(Compatibility compatibility);

View file

@ -2,6 +2,7 @@ package com.provectus.kafka.ui.model.rbac;
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
import com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction;
import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
@ -11,6 +12,7 @@ import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import lombok.Value;
import org.springframework.util.Assert;
@ -40,6 +42,11 @@ public class AccessContext {
Collection<AclAction> aclActions;
Collection<AuditAction> auditAction;
String operationName;
Object operationParams;
public static AccessContextBuilder builder() {
return new AccessContextBuilder();
}
@ -59,6 +66,10 @@ public class AccessContext {
private Collection<SchemaAction> schemaActions = Collections.emptySet();
private Collection<KsqlAction> ksqlActions = Collections.emptySet();
private Collection<AclAction> aclActions = Collections.emptySet();
private Collection<AuditAction> auditActions = Collections.emptySet();
private String operationName;
private Object operationParams;
private AccessContextBuilder() {
}
@ -141,6 +152,27 @@ public class AccessContext {
return this;
}
public AccessContextBuilder auditActions(AuditAction... actions) {
Assert.isTrue(actions.length > 0, "actions not present");
this.auditActions = List.of(actions);
return this;
}
public AccessContextBuilder operationName(String operationName) {
this.operationName = operationName;
return this;
}
public AccessContextBuilder operationParams(Object operationParams) {
this.operationParams = operationParams;
return this;
}
public AccessContextBuilder operationParams(Map<String, Object> paramsMap) {
this.operationParams = paramsMap;
return this;
}
public AccessContext build() {
return new AccessContext(
applicationConfigActions,
@ -150,7 +182,8 @@ public class AccessContext {
connect, connectActions,
connector,
schema, schemaActions,
ksqlActions, aclActions);
ksqlActions, aclActions, auditActions,
operationName, operationParams);
}
}
}

View file

@ -2,11 +2,13 @@ package com.provectus.kafka.ui.model.rbac;
import static com.provectus.kafka.ui.model.rbac.Resource.ACL;
import static com.provectus.kafka.ui.model.rbac.Resource.APPLICATIONCONFIG;
import static com.provectus.kafka.ui.model.rbac.Resource.AUDIT;
import static com.provectus.kafka.ui.model.rbac.Resource.CLUSTERCONFIG;
import static com.provectus.kafka.ui.model.rbac.Resource.KSQL;
import com.provectus.kafka.ui.model.rbac.permission.AclAction;
import com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction;
import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
@ -14,6 +16,7 @@ import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.regex.Pattern;
import javax.annotation.Nullable;
@ -28,7 +31,8 @@ import org.springframework.util.Assert;
@EqualsAndHashCode
public class Permission {
private static final List<Resource> RBAC_ACTION_EXEMPT_LIST = List.of(KSQL, CLUSTERCONFIG, APPLICATIONCONFIG, ACL);
private static final List<Resource> RBAC_ACTION_EXEMPT_LIST =
List.of(KSQL, CLUSTERCONFIG, APPLICATIONCONFIG, ACL, AUDIT);
Resource resource;
List<String> actions;
@ -70,6 +74,10 @@ public class Permission {
}
private List<String> getAllActionValues() {
if (resource == null) {
return Collections.emptyList();
}
return switch (this.resource) {
case APPLICATIONCONFIG -> Arrays.stream(ApplicationConfigAction.values()).map(Enum::toString).toList();
case CLUSTERCONFIG -> Arrays.stream(ClusterConfigAction.values()).map(Enum::toString).toList();
@ -79,6 +87,7 @@ public class Permission {
case CONNECT -> Arrays.stream(ConnectAction.values()).map(Enum::toString).toList();
case KSQL -> Arrays.stream(KsqlAction.values()).map(Enum::toString).toList();
case ACL -> Arrays.stream(AclAction.values()).map(Enum::toString).toList();
case AUDIT -> Arrays.stream(AuditAction.values()).map(Enum::toString).toList();
};
}

View file

@ -12,7 +12,8 @@ public enum Resource {
SCHEMA,
CONNECT,
KSQL,
ACL;
ACL,
AUDIT;
@Nullable
public static Resource fromString(String name) {

View file

@ -0,0 +1,14 @@
package com.provectus.kafka.ui.model.rbac.permission;
import org.apache.commons.lang3.EnumUtils;
import org.jetbrains.annotations.Nullable;
public enum AuditAction implements PermissibleAction {
VIEW;
@Nullable
public static AuditAction fromString(String name) {
return EnumUtils.getEnum(AuditAction.class, name);
}
}

View file

@ -1,4 +1,8 @@
package com.provectus.kafka.ui.model.rbac.permission;
public interface PermissibleAction {
public sealed interface PermissibleAction permits
AclAction, ApplicationConfigAction,
ConsumerGroupAction, SchemaAction,
ConnectAction, ClusterConfigAction,
KsqlAction, TopicAction, AuditAction {
}

View file

@ -11,6 +11,8 @@ import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.Serde;
import com.provectus.kafka.ui.serdes.builtin.AvroEmbeddedSerde;
import com.provectus.kafka.ui.serdes.builtin.Base64Serde;
import com.provectus.kafka.ui.serdes.builtin.ConsumerOffsetsSerde;
import com.provectus.kafka.ui.serdes.builtin.HexSerde;
import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
import com.provectus.kafka.ui.serdes.builtin.Int64Serde;
import com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde;
@ -46,6 +48,7 @@ public class SerdesInitializer {
.put(UInt64Serde.name(), UInt64Serde.class)
.put(AvroEmbeddedSerde.name(), AvroEmbeddedSerde.class)
.put(Base64Serde.name(), Base64Serde.class)
.put(HexSerde.name(), HexSerde.class)
.put(UuidBinarySerde.name(), UuidBinarySerde.class)
.build(),
new CustomSerdeLoader()
@ -118,6 +121,8 @@ public class SerdesInitializer {
}
});
registerTopicRelatedSerde(registeredSerdes);
return new ClusterSerdes(
registeredSerdes,
Optional.ofNullable(clusterProperties.getDefaultKeySerde())
@ -132,6 +137,27 @@ public class SerdesInitializer {
);
}
/**
* Registers serdse that should only be used for specific (hard-coded) topics, like ConsumerOffsetsSerde.
*/
private void registerTopicRelatedSerde(Map<String, SerdeInstance> serdes) {
registerConsumerOffsetsSerde(serdes);
}
private void registerConsumerOffsetsSerde(Map<String, SerdeInstance> serdes) {
var pattern = Pattern.compile(ConsumerOffsetsSerde.TOPIC);
serdes.put(
ConsumerOffsetsSerde.name(),
new SerdeInstance(
ConsumerOffsetsSerde.name(),
new ConsumerOffsetsSerde(),
pattern,
pattern,
null
)
);
}
private SerdeInstance createFallbackSerde() {
StringSerde serde = new StringSerde();
serde.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty());

View file

@ -19,12 +19,6 @@ public class AvroEmbeddedSerde implements BuiltInSerde {
return "Avro (Embedded)";
}
@Override
public void configure(PropertyResolver serdeProperties,
PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
}
@Override
public Optional<String> getDescription() {
return Optional.empty();

View file

@ -1,8 +1,6 @@
package com.provectus.kafka.ui.serdes.builtin;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.RecordHeaders;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.util.Base64;
@ -16,12 +14,6 @@ public class Base64Serde implements BuiltInSerde {
return "Base64";
}
@Override
public void configure(PropertyResolver serdeProperties,
PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
}
@Override
public Optional<String> getDescription() {
return Optional.empty();
@ -44,31 +36,25 @@ public class Base64Serde implements BuiltInSerde {
@Override
public Serializer serializer(String topic, Target type) {
return new Serializer() {
@Override
public byte[] serialize(String input) {
input = input.trim();
// it is actually a hack to provide ability to sent empty array as a key/value
if (input.length() == 0) {
return new byte[]{};
}
return Base64.getDecoder().decode(input);
var decoder = Base64.getDecoder();
return inputString -> {
inputString = inputString.trim();
// it is actually a hack to provide ability to sent empty array as a key/value
if (inputString.length() == 0) {
return new byte[] {};
}
return decoder.decode(inputString);
};
}
@Override
public Deserializer deserializer(String topic, Target type) {
var encoder = Base64.getEncoder();
return new Deserializer() {
@Override
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
return new DeserializeResult(
return (headers, data) ->
new DeserializeResult(
encoder.encodeToString(data),
DeserializeResult.Type.STRING,
Map.of()
);
}
};
}
}

View file

@ -0,0 +1,294 @@
package com.provectus.kafka.ui.serdes.builtin;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.fasterxml.jackson.databind.json.JsonMapper;
import com.fasterxml.jackson.databind.module.SimpleModule;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Optional;
import lombok.SneakyThrows;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.BoundField;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Struct;
import org.apache.kafka.common.protocol.types.Type;
// Deserialization logic and message's schemas can be found in
// kafka.coordinator.group.GroupMetadataManager (readMessageKey, readOffsetMessageValue, readGroupMessageValue)
public class ConsumerOffsetsSerde implements BuiltInSerde {
private static final JsonMapper JSON_MAPPER = createMapper();
public static final String TOPIC = "__consumer_offsets";
public static String name() {
return "__consumer_offsets";
}
private static JsonMapper createMapper() {
var module = new SimpleModule();
module.addSerializer(Struct.class, new JsonSerializer<>() {
@Override
public void serialize(Struct value, JsonGenerator gen, SerializerProvider serializers) throws IOException {
gen.writeStartObject();
for (BoundField field : value.schema().fields()) {
var fieldVal = value.get(field);
gen.writeObjectField(field.def.name, fieldVal);
}
gen.writeEndObject();
}
});
var mapper = new JsonMapper();
mapper.registerModule(module);
return mapper;
}
@Override
public Optional<String> getDescription() {
return Optional.empty();
}
@Override
public Optional<SchemaDescription> getSchema(String topic, Target type) {
return Optional.empty();
}
@Override
public boolean canDeserialize(String topic, Target type) {
return topic.equals(TOPIC);
}
@Override
public boolean canSerialize(String topic, Target type) {
return false;
}
@Override
public Serializer serializer(String topic, Target type) {
throw new UnsupportedOperationException();
}
@Override
public Deserializer deserializer(String topic, Target type) {
return switch (type) {
case KEY -> keyDeserializer();
case VALUE -> valueDeserializer();
};
}
private Deserializer keyDeserializer() {
final Schema commitKeySchema = new Schema(
new Field("group", Type.STRING, ""),
new Field("topic", Type.STRING, ""),
new Field("partition", Type.INT32, "")
);
final Schema groupMetadataSchema = new Schema(
new Field("group", Type.STRING, "")
);
return (headers, data) -> {
var bb = ByteBuffer.wrap(data);
short version = bb.getShort();
return new DeserializeResult(
toJson(
switch (version) {
case 0, 1 -> commitKeySchema.read(bb);
case 2 -> groupMetadataSchema.read(bb);
default -> throw new IllegalStateException("Unknown group metadata message version: " + version);
}
),
DeserializeResult.Type.JSON,
Map.of()
);
};
}
private Deserializer valueDeserializer() {
final Schema commitOffsetSchemaV0 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV1 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, ""),
new Field("expire_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV2 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV3 =
new Schema(
new Field("offset", Type.INT64, ""),
new Field("leader_epoch", Type.INT32, ""),
new Field("metadata", Type.STRING, ""),
new Field("commit_timestamp", Type.INT64, "")
);
final Schema commitOffsetSchemaV4 = new Schema(
new Field("offset", Type.INT64, ""),
new Field("leader_epoch", Type.INT32, ""),
new Field("metadata", Type.COMPACT_STRING, ""),
new Field("commit_timestamp", Type.INT64, ""),
Field.TaggedFieldsSection.of()
);
final Schema metadataSchema0 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema1 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema2 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema3 =
new Schema(
new Field("protocol_type", Type.STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.NULLABLE_STRING, ""),
new Field("leader", Type.NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new ArrayOf(new Schema(
new Field("member_id", Type.STRING, ""),
new Field("group_instance_id", Type.NULLABLE_STRING, ""),
new Field("client_id", Type.STRING, ""),
new Field("client_host", Type.STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.BYTES, ""),
new Field("assignment", Type.BYTES, "")
)), "")
);
final Schema metadataSchema4 =
new Schema(
new Field("protocol_type", Type.COMPACT_STRING, ""),
new Field("generation", Type.INT32, ""),
new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
new Field("current_state_timestamp", Type.INT64, ""),
new Field("members", new CompactArrayOf(new Schema(
new Field("member_id", Type.COMPACT_STRING, ""),
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
new Field("client_id", Type.COMPACT_STRING, ""),
new Field("client_host", Type.COMPACT_STRING, ""),
new Field("rebalance_timeout", Type.INT32, ""),
new Field("session_timeout", Type.INT32, ""),
new Field("subscription", Type.COMPACT_BYTES, ""),
new Field("assignment", Type.COMPACT_BYTES, ""),
Field.TaggedFieldsSection.of()
)), ""),
Field.TaggedFieldsSection.of()
);
return (headers, data) -> {
String result;
var bb = ByteBuffer.wrap(data);
short version = bb.getShort();
// ideally, we should distinguish if value is commit or metadata
// by checking record's key, but our current serde structure doesn't allow that.
// so, we trying to parse into metadata first and after into commit msg
try {
result = toJson(
switch (version) {
case 0 -> metadataSchema0.read(bb);
case 1 -> metadataSchema1.read(bb);
case 2 -> metadataSchema2.read(bb);
case 3 -> metadataSchema3.read(bb);
case 4 -> metadataSchema4.read(bb);
default -> throw new IllegalArgumentException("Unrecognized version: " + version);
}
);
} catch (Throwable e) {
bb = bb.rewind();
bb.getShort(); // skipping version
result = toJson(
switch (version) {
case 0 -> commitOffsetSchemaV0.read(bb);
case 1 -> commitOffsetSchemaV1.read(bb);
case 2 -> commitOffsetSchemaV2.read(bb);
case 3 -> commitOffsetSchemaV3.read(bb);
case 4 -> commitOffsetSchemaV4.read(bb);
default -> throw new IllegalArgumentException("Unrecognized version: " + version);
}
);
}
if (bb.remaining() != 0) {
throw new IllegalArgumentException(
"Message buffer is not read to the end, which is likely means message is unrecognized");
}
return new DeserializeResult(
result,
DeserializeResult.Type.JSON,
Map.of()
);
};
}
@SneakyThrows
private String toJson(Struct s) {
return JSON_MAPPER.writeValueAsString(s);
}
}

View file

@ -0,0 +1,89 @@
package com.provectus.kafka.ui.serdes.builtin;
import com.provectus.kafka.ui.serde.api.DeserializeResult;
import com.provectus.kafka.ui.serde.api.PropertyResolver;
import com.provectus.kafka.ui.serde.api.SchemaDescription;
import com.provectus.kafka.ui.serdes.BuiltInSerde;
import java.util.HexFormat;
import java.util.Map;
import java.util.Optional;
public class HexSerde implements BuiltInSerde {
private HexFormat deserializeHexFormat;
public static String name() {
return "Hex";
}
@Override
public void autoConfigure(PropertyResolver kafkaClusterProperties, PropertyResolver globalProperties) {
configure(" ", true);
}
@Override
public void configure(PropertyResolver serdeProperties,
PropertyResolver kafkaClusterProperties,
PropertyResolver globalProperties) {
String delim = serdeProperties.getProperty("delimiter", String.class).orElse(" ");
boolean uppercase = serdeProperties.getProperty("uppercase", Boolean.class).orElse(true);
configure(delim, uppercase);
}
private void configure(String delim, boolean uppercase) {
deserializeHexFormat = HexFormat.ofDelimiter(delim);
if (uppercase) {
deserializeHexFormat = deserializeHexFormat.withUpperCase();
}
}
@Override
public Optional<String> getDescription() {
return Optional.empty();
}
@Override
public Optional<SchemaDescription> getSchema(String topic, Target type) {
return Optional.empty();
}
@Override
public boolean canDeserialize(String topic, Target type) {
return true;
}
@Override
public boolean canSerialize(String topic, Target type) {
return true;
}
@Override
public Serializer serializer(String topic, Target type) {
return input -> {
input = input.trim();
// it is a hack to provide ability to sent empty array as a key/value
if (input.length() == 0) {
return new byte[] {};
}
return HexFormat.of().parseHex(prepareInputForParse(input));
};
}
// removing most-common delimiters and prefixes
private static String prepareInputForParse(String input) {
return input
.replaceAll(" ", "")
.replaceAll("#", "")
.replaceAll(":", "");
}
@Override
public Deserializer deserializer(String topic, Target type) {
return (headers, data) ->
new DeserializeResult(
deserializeHexFormat.formatHex(data),
DeserializeResult.Type.STRING,
Map.of()
);
}
}

View file

@ -55,15 +55,11 @@ public class Int64Serde implements BuiltInSerde {
@Override
public Deserializer deserializer(String topic, Target type) {
return new Deserializer() {
@Override
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
return new DeserializeResult(
return (headers, data) ->
new DeserializeResult(
String.valueOf(Longs.fromByteArray(data)),
DeserializeResult.Type.JSON,
Map.of()
);
}
};
}
}

View file

@ -50,7 +50,6 @@ import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
import java.io.ByteArrayInputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
@ -204,17 +203,13 @@ public class ProtobufFileSerde implements BuiltInSerde {
Map<String, Descriptor> keyMessageDescriptorMap) {
static boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties) {
Optional<String> protobufFile = kafkaClusterProperties.getProperty("protobufFile", String.class);
Optional<List<String>> protobufFiles = kafkaClusterProperties.getListProperty("protobufFiles", String.class);
Optional<String> protobufFilesDir = kafkaClusterProperties.getProperty("protobufFilesDir", String.class);
return protobufFilesDir.isPresent()
|| protobufFile.isPresent()
|| protobufFiles.filter(files -> !files.isEmpty()).isPresent();
return protobufFilesDir.isPresent() || protobufFiles.filter(files -> !files.isEmpty()).isPresent();
}
static Configuration create(PropertyResolver properties) {
var protobufSchemas = loadSchemas(
properties.getProperty("protobufFile", String.class),
properties.getListProperty("protobufFiles", String.class),
properties.getProperty("protobufFilesDir", String.class)
);
@ -272,12 +267,11 @@ public class ProtobufFileSerde implements BuiltInSerde {
}
@VisibleForTesting
static Map<Path, ProtobufSchema> loadSchemas(Optional<String> protobufFile,
Optional<List<String>> protobufFiles,
static Map<Path, ProtobufSchema> loadSchemas(Optional<List<String>> protobufFiles,
Optional<String> protobufFilesDir) {
if (protobufFilesDir.isPresent()) {
if (protobufFile.isPresent() || protobufFiles.isPresent()) {
log.warn("protobufFile and protobufFiles properties will be ignored, since protobufFilesDir provided");
if (protobufFiles.isPresent()) {
log.warn("protobufFiles properties will be ignored, since protobufFilesDir provided");
}
List<ProtoFile> loadedFiles = new ProtoSchemaLoader(protobufFilesDir.get()).load();
Map<String, ProtoFileElement> allPaths = loadedFiles.stream()
@ -288,10 +282,8 @@ public class ProtobufFileSerde implements BuiltInSerde {
f -> new ProtobufSchema(f.toElement(), List.of(), allPaths)));
}
//Supporting for backward-compatibility. Normally, protobufFilesDir setting should be used
return Stream.concat(
protobufFile.stream(),
protobufFiles.stream().flatMap(Collection::stream)
)
return protobufFiles.stream()
.flatMap(Collection::stream)
.distinct()
.map(Path::of)
.collect(Collectors.toMap(path -> path, path -> new ProtobufSchema(readFileAsString(path))));

Some files were not shown because too many files have changed in this diff Show more